Merge branch 'master' into hanfei/disable-shared-set-join

This commit is contained in:
Han Fei 2024-11-22 18:16:45 +01:00 committed by GitHub
commit 34bf8d3b2c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3552 changed files with 373234 additions and 11216 deletions

View File

@ -16,6 +16,9 @@ Checks: [
'-android-*',
'-boost-use-ranges',
'-modernize-use-ranges',
'-bugprone-assignment-in-if-condition',
'-bugprone-branch-clone',
'-bugprone-easily-swappable-parameters',
@ -28,7 +31,6 @@ Checks: [
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
'-bugprone-unchecked-optional-access',
'-bugprone-crtp-constructor-accessibility',
'-bugprone-suspicious-stringview-data-usage',
'-cert-dcl16-c',
'-cert-dcl37-c',
@ -42,6 +44,8 @@ Checks: [
'-clang-analyzer-optin.performance.Padding',
'-clang-analyzer-cplusplus.PlacementNew',
'-clang-analyzer-unix.Malloc',
'-cppcoreguidelines-*', # impractical in a codebase as large as ClickHouse, also slow
@ -90,6 +94,7 @@ Checks: [
'-misc-non-private-member-variables-in-classes',
'-misc-confusable-identifiers', # useful but slooo
'-misc-use-anonymous-namespace',
'-misc-use-internal-linkage',
'-modernize-avoid-c-arrays',
'-modernize-concat-nested-namespaces',
@ -137,6 +142,7 @@ Checks: [
'-readability-suspicious-call-argument',
'-readability-uppercase-literal-suffix',
'-readability-use-anyofallof',
'-readability-math-missing-parentheses',
'-zircon-*'
]

View File

@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
- Critical Bug Fix (crash, data loss, RBAC)
- Bug Fix (user-visible misbehavior in an official stable release)
- CI Fix or Improvement (changelog entry is not required)
- Not for changelog (changelog entry is not required)

View File

@ -25,6 +25,11 @@ env:
required: false
default: false
type: boolean
only-docker:
description: 'Run only docker builds (repo-recovery, tests)'
required: false
default: false
type: boolean
dry-run:
description: 'Dry run'
required: false
@ -45,6 +50,11 @@ env:
required: false
default: false
type: boolean
only-docker:
description: 'Run only docker builds (repo-recovery, tests)'
required: false
default: false
type: boolean
dry-run:
description: 'Dry run'
required: false
@ -69,13 +79,13 @@ jobs:
- name: Prepare Release Info
shell: bash
run: |
if [ ${{ inputs.only-repo }} == "true" ]; then
git tag -l ${{ inputs.ref }} || { echo "With only-repo option ref must be a valid release tag"; exit 1; }
if [ ${{ inputs.only-repo }} == "true" ] || [ ${{ inputs.only-docker }} == "true" ]; then
git tag -l ${{ inputs.ref }} || { echo "With only-repo/docker option ref must be a valid release tag"; exit 1; }
fi
python3 ./tests/ci/create_release.py --prepare-release-info \
--ref ${{ inputs.ref }} --release-type ${{ inputs.type }} \
${{ inputs.dry-run == true && '--dry-run' || '' }} \
${{ inputs.only-repo == true && '--skip-tag-check' || '' }}
${{ (inputs.only-repo == true || inputs.only-docker == true) && '--skip-tag-check' || '' }}
echo "::group::Release Info"
python3 -m json.tool /tmp/release_info.json
echo "::endgroup::"
@ -87,31 +97,33 @@ jobs:
echo "COMMIT_SHA=$commit_sha" >> "$GITHUB_ENV"
if [ "$is_latest" == "true" ]; then
echo "DOCKER_TAG_TYPE=release-latest" >> "$GITHUB_ENV"
echo "IS_LATEST=1" >> "$GITHUB_ENV"
else
echo "DOCKER_TAG_TYPE=release" >> "$GITHUB_ENV"
echo "IS_LATEST=0" >> "$GITHUB_ENV"
fi
- name: Download All Release Artifacts
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --download-packages ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Push Git Tag for the Release
if: ${{ ! inputs.only-repo }}
if: ${{ ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-release-tag ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Push New Release Branch
if: ${{ inputs.type == 'new' && ! inputs.only-repo }}
if: ${{ inputs.type == 'new' && ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --push-new-release-branch ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Bump CH Version and Update Contributors' List
if: ${{ ! inputs.only-repo }}
if: ${{ ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --create-bump-version-pr ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Bump Docker versions, Changelog, Security
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/create_release.py --set-progress-started --progress "update changelog, docker version, security"
@ -135,7 +147,7 @@ jobs:
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create ChangeLog PR
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo }}
if: ${{ inputs.type == 'patch' && ! inputs.dry-run && ! inputs.only-repo && ! inputs.only-docker }}
uses: peter-evans/create-pull-request@v6
with:
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
@ -153,65 +165,176 @@ jobs:
### Changelog category (leave one):
- Not for changelog (changelog entry is not required)
- name: Complete previous steps and Restore git state
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker }}
shell: bash
run: |
git reset --hard HEAD
git checkout "$GITHUB_REF_NAME"
python3 ./tests/ci/create_release.py --set-progress-completed
- name: Create GH Release
if: ${{ inputs.type == 'patch' && ! inputs.only-repo }}
if: ${{ inputs.type == 'patch' && ! inputs.only-repo && ! inputs.only-docker}}
shell: bash
run: |
python3 ./tests/ci/create_release.py --create-gh-release ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export TGZ Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test TGZ Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-tgz ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export RPM Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test RPM Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-rpm ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Export Debian Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --export-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Test Debian Packages
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && ! inputs.only-docker }}
shell: bash
run: |
python3 ./tests/ci/artifactory.py --test-debian ${{ inputs.dry-run == true && '--dry-run' || '' }}
- name: Docker clickhouse/clickhouse-server building
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && inputs.dry-run != true }}
shell: bash
run: |
cd "./tests/ci"
python3 ./create_release.py --set-progress-started --progress "docker server release"
export CHECK_NAME="Docker server image"
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
export DOCKER_IMAGE="clickhouse/clickhouse-server"
# We must use docker file from the release commit
git checkout "${{ env.RELEASE_TAG }}"
python3 ./version_helper.py --export > /tmp/version.sh
. /tmp/version.sh
if [[ $CLICKHOUSE_VERSION_STRING =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "ClickHouse version: $CLICKHOUSE_VERSION_STRING"
else
echo "Invalid version string: $CLICKHOUSE_VERSION_STRING"
exit 1
fi
CLICKHOUSE_VERSION_MINOR=${CLICKHOUSE_VERSION_STRING%.*}
CLICKHOUSE_VERSION_MAJOR=${CLICKHOUSE_VERSION_MINOR%.*}
# Define build configurations
configs=(
"ubuntu:../../docker/server/Dockerfile.ubuntu"
"alpine:../../docker/server/Dockerfile.alpine"
)
for config in "${configs[@]}"; do
# Split the config into variant and Dockerfile path
variant=${config%%:*}
dockerfile=${config##*:}
VERSION_SUFFIX=$([ "$variant" = "ubuntu" ] && echo "" || echo "-$variant")
LABEL_VERSION="${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}"
TAGS=(
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}"
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MINOR}${VERSION_SUFFIX}"
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MAJOR}${VERSION_SUFFIX}"
)
if [ "$IS_LATEST" = "1" ]; then
TAGS+=("--tag=${DOCKER_IMAGE}:latest${VERSION_SUFFIX}")
fi
echo "Following tags will be created: ${TAGS[*]}"
# shellcheck disable=SC2086,SC2048
docker buildx build \
--platform=linux/amd64,linux/arm64 \
--output=type=registry \
--label=com.clickhouse.build.version="$LABEL_VERSION" \
${TAGS[*]} \
--build-arg=VERSION="$CLICKHOUSE_VERSION_STRING" \
--progress=plain \
--file="$dockerfile" \
../../docker/server
done
git checkout -
python3 ./create_release.py --set-progress-completed
- name: Docker clickhouse/clickhouse-keeper building
if: ${{ inputs.type == 'patch' }}
if: ${{ inputs.type == 'patch' && inputs.dry-run != true }}
shell: bash
run: |
cd "./tests/ci"
python3 ./create_release.py --set-progress-started --progress "docker keeper release"
export CHECK_NAME="Docker keeper image"
python3 docker_server.py --tag-type ${{ env.DOCKER_TAG_TYPE }} --version ${{ env.RELEASE_TAG }} --check-name "$CHECK_NAME" --sha ${{ env.COMMIT_SHA }} ${{ ! inputs.dry-run && '--push' || '' }}
export DOCKER_IMAGE="clickhouse/clickhouse-keeper"
# We must use docker file from the release commit
git checkout "${{ env.RELEASE_TAG }}"
python3 ./version_helper.py --export > /tmp/version.sh
. /tmp/version.sh
if [[ $CLICKHOUSE_VERSION_STRING =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "ClickHouse version: $CLICKHOUSE_VERSION_STRING"
else
echo "Invalid version string: $CLICKHOUSE_VERSION_STRING"
exit 1
fi
CLICKHOUSE_VERSION_MINOR=${CLICKHOUSE_VERSION_STRING%.*}
CLICKHOUSE_VERSION_MAJOR=${CLICKHOUSE_VERSION_MINOR%.*}
# Define build configurations
configs=(
"ubuntu:../../docker/keeper/Dockerfile.ubuntu"
"alpine:../../docker/keeper/Dockerfile.alpine"
)
for config in "${configs[@]}"; do
# Split the config into variant and Dockerfile path
variant=${config%%:*}
dockerfile=${config##*:}
VERSION_SUFFIX=$([ "$variant" = "ubuntu" ] && echo "" || echo "-$variant")
LABEL_VERSION="${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}"
TAGS=(
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_STRING}${VERSION_SUFFIX}"
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MINOR}${VERSION_SUFFIX}"
"--tag=${DOCKER_IMAGE}:${CLICKHOUSE_VERSION_MAJOR}${VERSION_SUFFIX}"
)
if [ "$IS_LATEST" = "1" ]; then
TAGS+=("--tag=${DOCKER_IMAGE}:latest${VERSION_SUFFIX}")
fi
echo "Following tags will be created: ${TAGS[*]}"
# shellcheck disable=SC2086,SC2048
docker buildx build \
--platform=linux/amd64,linux/arm64 \
--output=type=registry \
--label=com.clickhoghuse.build.version="$LABEL_VERSION" \
${TAGS[*]} \
--build-arg=VERSION="$CLICKHOUSE_VERSION_STRING" \
--progress=plain \
--file="$dockerfile" \
../../docker/keeper
done
git checkout -
python3 ./create_release.py --set-progress-completed
# check out back if previous steps failed
- name: Checkout back
if: ${{ ! cancelled() }}
shell: bash
run: |
git checkout ${{ github.ref }}
- name: Update release info. Merge created PRs
shell: bash
run: |

View File

@ -58,13 +58,8 @@ jobs:
test_name: Style check
runner_type: style-checker-aarch64
run_command: |
python3 style_check.py
python3 style_check.py --no-push
data: ${{ needs.RunConfig.outputs.data }}
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
FastTest:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}

View File

@ -27,7 +27,7 @@ jobs:
id: runconfig
run: |
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --outfile ${{ runner.temp }}/ci_run_data.json
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow NightlyBuilds --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
@ -44,9 +44,39 @@ jobs:
with:
data: "${{ needs.RunConfig.outputs.data }}"
set_latest: true
Builds_1:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
uses: ./.github/workflows/reusable_build_stage.yml
with:
stage: Builds_1
data: ${{ needs.RunConfig.outputs.data }}
Tests_1:
needs: [RunConfig, Builds_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
stage: Tests_1
data: ${{ needs.RunConfig.outputs.data }}
Builds_2:
needs: [RunConfig, Builds_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_2') }}
uses: ./.github/workflows/reusable_build_stage.yml
with:
stage: Builds_2
data: ${{ needs.RunConfig.outputs.data }}
Tests_2:
needs: [RunConfig, Builds_1, Tests_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
stage: Tests_2
data: ${{ needs.RunConfig.outputs.data }}
CheckWorkflow:
if: ${{ !cancelled() }}
needs: [RunConfig, BuildDockers]
needs: [RunConfig, BuildDockers, Tests_2]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code

View File

@ -79,10 +79,7 @@ jobs:
python3 style_check.py
data: ${{ needs.RunConfig.outputs.data }}
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
robot_git_token: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
FastTest:
needs: [RunConfig, BuildDockers, StyleCheck]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}

View File

@ -34,8 +34,11 @@ name: Build ClickHouse
description: additional ENV variables to setup the job
type: string
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
jobs:
@ -58,10 +61,18 @@ jobs:
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
${{secrets.secret_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.robot_git_token}}
RCSK
CI_DB_URL<<CIDBU
${{ secrets.ci_db_url }}
CIDBU
CI_DB_PASSWORD<<CIDBP
${{ secrets.ci_db_password }}
CIDBP
EOF
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build

View File

@ -18,8 +18,11 @@ name: BuildStageWF
type: string
required: true
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
jobs:
@ -39,4 +42,6 @@ jobs:
checkout_depth: 0
data: ${{ inputs.data }}
secrets:
secret_envs: ${{ secrets.secret_envs }}
robot_git_token: ${{ secrets.robot_git_token }}
ci_db_url: ${{ secrets.ci_db_url }}
ci_db_password: ${{ secrets.ci_db_password }}

View File

@ -45,8 +45,11 @@ name: Simple job
type: boolean
default: false
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
@ -77,7 +80,15 @@ jobs:
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.robot_git_token}}
RCSK
CI_DB_URL<<CIDBU
${{ secrets.ci_db_url }}
CIDBU
CI_DB_PASSWORD<<CIDBP
${{ secrets.ci_db_password }}
CIDBP
EOF
- name: Common setup
uses: ./.github/actions/common_setup

View File

@ -40,8 +40,11 @@ name: Testing workflow
type: string
default: "$GITHUB_WORKSPACE/tests/ci"
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
@ -75,10 +78,18 @@ jobs:
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.robot_git_token}}
RCSK
CI_DB_URL<<CIDBU
${{ secrets.ci_db_url }}
CIDBU
CI_DB_PASSWORD<<CIDBP
${{ secrets.ci_db_password }}
CIDBP
EOF
- name: Common setup
uses: ./.github/actions/common_setup

View File

@ -15,8 +15,11 @@ name: StageWF
type: string
required: true
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
jobs:
@ -32,4 +35,6 @@ jobs:
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
data: ${{ inputs.data }}
secrets:
secret_envs: ${{ secrets.secret_envs }}
robot_git_token: ${{ secrets.robot_git_token }}
ci_db_url: ${{ secrets.ci_db_url }}
ci_db_password: ${{ secrets.ci_db_password }}

6
.gitmodules vendored
View File

@ -1,6 +1,9 @@
# Please do not use 'branch = ...' tags with submodule entries. Such tags make updating submodules a
# little bit more convenient but they do *not* specify the tracked submodule branch. Thus, they are
# more confusing than useful.
[submodule "contrib/jwt-cpp"]
path = contrib/jwt-cpp
url = https://github.com/Thalhammer/jwt-cpp
[submodule "contrib/zstd"]
path = contrib/zstd
url = https://github.com/facebook/zstd
@ -348,6 +351,9 @@
[submodule "contrib/idna"]
path = contrib/idna
url = https://github.com/ada-url/idna.git
[submodule "contrib/google-cloud-cpp"]
path = contrib/google-cloud-cpp
url = https://github.com/ClickHouse/google-cloud-cpp.git
[submodule "contrib/rust_vendor"]
path = contrib/rust_vendor
url = https://github.com/ClickHouse/rust_vendor.git

View File

@ -1,4 +1,5 @@
### Table of Contents
**[ClickHouse release v24.11, 2024-11-26](#2411)**<br/>
**[ClickHouse release v24.10, 2024-10-31](#2410)**<br/>
**[ClickHouse release v24.9, 2024-09-26](#249)**<br/>
**[ClickHouse release v24.8 LTS, 2024-08-20](#248)**<br/>
@ -13,6 +14,95 @@
# 2024 Changelog
### <a id="2411"></a> ClickHouse release 24.11, 2024-11-26
#### Backward Incompatible Change
* Remove system tables `generate_series` and `generateSeries`. They were added by mistake here: [#59390](https://github.com/ClickHouse/ClickHouse/issues/59390). [#71091](https://github.com/ClickHouse/ClickHouse/pull/71091) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove `StorageExternalDistributed`. Closes [#70600](https://github.com/ClickHouse/ClickHouse/issues/70600).[#71176](https://github.com/ClickHouse/ClickHouse/pull/71176) ([flynn](https://github.com/ucasfl)).
* The table engines Kafka, NATS and RabbitMQ are now covered by their own grants in the `SOURCES` hierarchy. Add grants to any non-default database users that create tables with these engine types. [#71250](https://github.com/ClickHouse/ClickHouse/pull/71250) ([Christoph Wurm](https://github.com/cwurm)).
* Check the full mutation query before executing it (including subqueries). This prevents accidentally running an invalid query and building up dead mutations that block valid mutations. [#71300](https://github.com/ClickHouse/ClickHouse/pull/71300) ([Christoph Wurm](https://github.com/cwurm)).
* Rename filesystem cache setting `skip_download_if_exceeds_query_cache` to `filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit`. [#71578](https://github.com/ClickHouse/ClickHouse/pull/71578) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Remove support for `Enum` as well as `UInt128` and `UInt256` arguments in `deltaSumTimestamp`. Remove support for `Int8`, `UInt8`, `Int16`, and `UInt16` of the second ("timestamp") argument of `deltaSumTimestamp`. [#71790](https://github.com/ClickHouse/ClickHouse/pull/71790) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* When retrieving data directly from a dictionary using Dictionary storage, dictionary table function, or direct SELECT from the dictionary itself, it is now enough to have `SELECT` permission or `dictGet` permission for the dictionary. This aligns with previous attempts to prevent ACL bypasses: https://github.com/ClickHouse/ClickHouse/pull/57362 and https://github.com/ClickHouse/ClickHouse/pull/65359. It also makes the latter one backward compatible. [#72051](https://github.com/ClickHouse/ClickHouse/pull/72051) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### Experimental feature
* Implement `allowed_feature_tier` as a global switch to disable all experimental / beta features. [#71841](https://github.com/ClickHouse/ClickHouse/pull/71841) ([Raúl Marín](https://github.com/Algunenano)).
* Fix possible error `No such file or directory` due to unescaped special symbols in files for JSON subcolumns. [#71182](https://github.com/ClickHouse/ClickHouse/pull/71182) ([Pavel Kruglov](https://github.com/Avogar)).
* Support alter from String to JSON. This PR also changes the serialization of JSON and Dynamic types to new version V2. Old version V1 can be still used by enabling setting `merge_tree_use_v1_object_and_dynamic_serialization` (can be used during upgrade to be able to rollback the version without issues). [#70442](https://github.com/ClickHouse/ClickHouse/pull/70442) ([Pavel Kruglov](https://github.com/Avogar)).
* Implement simple CAST from Map/Tuple/Object to new JSON through serialization/deserialization from JSON string. [#71320](https://github.com/ClickHouse/ClickHouse/pull/71320) ([Pavel Kruglov](https://github.com/Avogar)).
* Don't allow Variant/Dynamic types in ORDER BY/GROUP BY/PARTITION BY/PRIMARY KEY by default because it may lead to unexpected results. [#69731](https://github.com/ClickHouse/ClickHouse/pull/69731) ([Pavel Kruglov](https://github.com/Avogar)).
* Forbid Dynamic/Variant types in min/max functions to avoid confusion. [#71761](https://github.com/ClickHouse/ClickHouse/pull/71761) ([Pavel Kruglov](https://github.com/Avogar)).
#### New Feature
* A new data type, `BFloat16`, represents 16-bit floating point numbers with 8-bit exponent, sign, and 7-bit mantissa. This closes [#44206](https://github.com/ClickHouse/ClickHouse/issues/44206). This closes [#49937](https://github.com/ClickHouse/ClickHouse/issues/49937). [#64712](https://github.com/ClickHouse/ClickHouse/pull/64712) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add `CHECK GRANT` query to check whether the current user/role has been granted the specific privilege and whether the corresponding table/column exists in the memory. [#68885](https://github.com/ClickHouse/ClickHouse/pull/68885) ([Unalian](https://github.com/Unalian)).
* Added SQL syntax to describe workload and resource management. https://clickhouse.com/docs/en/operations/workload-scheduling. [#69187](https://github.com/ClickHouse/ClickHouse/pull/69187) ([Sergei Trifonov](https://github.com/serxa)).
* Added server setting `async_load_system_database` that allows the server to start with not fully loaded system database. This helps to start ClickHouse faster if there are many system tables. [#69847](https://github.com/ClickHouse/ClickHouse/pull/69847) ([Sergei Trifonov](https://github.com/serxa)).
* Allow each authentication method to have its own expiration date, remove from user entity. [#70090](https://github.com/ClickHouse/ClickHouse/pull/70090) ([Arthur Passos](https://github.com/arthurpassos)).
* Push external user roles from query originator to other nodes in cluster. Helpful when only originator has access to the external authenticator (like LDAP). [#70332](https://github.com/ClickHouse/ClickHouse/pull/70332) ([Andrey Zvonov](https://github.com/zvonand)).
* Added a new header type for S3 endpoints for user authentication (`access_header`). This allows to get some access header with the lowest priority, which will be overwritten with `access_key_id` from any other source (for example, a table schema or a named collection). [#71011](https://github.com/ClickHouse/ClickHouse/pull/71011) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Initial implementation of settings tiers. [#71145](https://github.com/ClickHouse/ClickHouse/pull/71145) ([Raúl Marín](https://github.com/Algunenano)).
* Add support for staleness clause in order by with fill operator. [#71151](https://github.com/ClickHouse/ClickHouse/pull/71151) ([Mikhail Artemenko](https://github.com/Michicosun)).
* Added aliases `anyRespectNulls`, `firstValueRespectNulls`, and `anyValueRespectNulls` for aggregation function `any`. Also added aliases `anyLastRespectNulls` and `lastValueRespectNulls` for aggregation function `anyLast`. This allows using more natural camel-case-only syntax rather than mixed camel-case/underscore syntax, for example: `SELECT anyLastRespectNullsStateIf` instead of `anyLast_respect_nullsStateIf`. [#71403](https://github.com/ClickHouse/ClickHouse/pull/71403) ([Peter Nguyen](https://github.com/petern48)).
* Added the configuration `date_time_utc` parameter, enabling JSON log formatting to support UTC date-time in RFC 3339/ISO8601 format. [#71560](https://github.com/ClickHouse/ClickHouse/pull/71560) ([Ali](https://github.com/xogoodnow)).
* Optimized memory usage for values of index granularity if granularity is constant for part. Added an ability to always select constant granularity for part (setting `use_const_adaptive_granularity`), which helps to ensure that it is always optimized in memory. It helps in large workloads (trillions of rows in shared storage) to avoid constantly growing memory usage by metadata (values of index granularity) of data parts. [#71786](https://github.com/ClickHouse/ClickHouse/pull/71786) ([Anton Popov](https://github.com/CurtizJ)).
* Add `iceberg[S3;HDFS;Azure]Cluster`, `deltaLakeCluster`, `hudiCluster` table functions. [#72045](https://github.com/ClickHouse/ClickHouse/pull/72045) ([Mikhail Artemenko](https://github.com/Michicosun)).
#### Performance Improvement
* Now we won't copy input blocks columns for `join_algorithm='parallel_hash'` when distribute them between threads for parallel processing. [#67782](https://github.com/ClickHouse/ClickHouse/pull/67782) ([Nikita Taranov](https://github.com/nickitat)).
* Optimized `Replacing` merge algorithm for non intersecting parts. [#70977](https://github.com/ClickHouse/ClickHouse/pull/70977) ([Anton Popov](https://github.com/CurtizJ)).
* Do not list detached parts from readonly and write-once disks for metrics and system.detached_parts. [#71086](https://github.com/ClickHouse/ClickHouse/pull/71086) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Do not calculate heavy asynchronous metrics by default. The feature was introduced in [#40332](https://github.com/ClickHouse/ClickHouse/issues/40332), but it isn't good to have a heavy background job that is needed for only a single customer. [#71087](https://github.com/ClickHouse/ClickHouse/pull/71087) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Improve the performance and accuracy of system.query_metric_log collection interval by reducing the critical region. [#71473](https://github.com/ClickHouse/ClickHouse/pull/71473) ([Pablo Marcos](https://github.com/pamarcos)).
#### Improvement
* Higher-order functions with constant arrays and constant captured arguments will return constants. [#58400](https://github.com/ClickHouse/ClickHouse/pull/58400) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Read-in-order optimization via generating virtual rows, so less data would be read during merge sort especially useful when multiple parts exist. [#62125](https://github.com/ClickHouse/ClickHouse/pull/62125) ([Shichao Jin](https://github.com/jsc0218)).
* Query plan step names (`EXPLAIN PLAN json=1`) and pipeline processor names (`EXPLAIN PIPELINE compact=0,graph=1`) now have a unique id as a suffix. This allows to match processors profiler output and OpenTelemetry traces with explain output. [#63518](https://github.com/ClickHouse/ClickHouse/pull/63518) ([qhsong](https://github.com/qhsong)).
* Added option to check object exists after writing to Azure Blob Storage, this is controlled by setting `check_objects_after_upload`. [#64847](https://github.com/ClickHouse/ClickHouse/pull/64847) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
* Use `Atomic` database by default in `clickhouse-local`. Address items 1 and 5 from [#50647](https://github.com/ClickHouse/ClickHouse/issues/50647). Closes [#44817](https://github.com/ClickHouse/ClickHouse/issues/44817). [#68024](https://github.com/ClickHouse/ClickHouse/pull/68024) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Exceptions break the HTTP protocol in order to alert the client about error. [#68800](https://github.com/ClickHouse/ClickHouse/pull/68800) ([Sema Checherinda](https://github.com/CheSema)).
* Report running DDLWorker hosts by creating replica_dir and mark replicas active in DDLWorker. [#69658](https://github.com/ClickHouse/ClickHouse/pull/69658) ([tuanpach](https://github.com/tuanpach)).
* Wait only on active replicas for database ON CLUSTER queries if distributed_ddl_output_mode is set to be *_only_active. [#69660](https://github.com/ClickHouse/ClickHouse/pull/69660) ([tuanpach](https://github.com/tuanpach)).
* Better error-handling and cancellation of `ON CLUSTER` backups and restores: - If a backup or restore fails on one host then it'll be cancelled on other hosts automatically - No weird errors must be produced because some hosts failed while other hosts continued their work - If a backup or restore is cancelled on one host then it'll be cancelled on other hosts automatically - Fix issues with `test_disallow_concurrency` - now disabling of concurrency must work better - Backups and restores now are much more resistant to ZooKeeper disconnects. [#70027](https://github.com/ClickHouse/ClickHouse/pull/70027) ([Vitaly Baranov](https://github.com/vitlibar)).
* Enable `parallel_replicas_local_plan` by default. Building a full-fledged local plan on the query initiator improves parallel replicas performance with less resource consumption, provides opportunities to apply more query optimizations. [#70171](https://github.com/ClickHouse/ClickHouse/pull/70171) ([Igor Nikonov](https://github.com/devcrafter)).
* Add ability to set user/password in http_handlers (for `dynamic_query_handler`/`predefined_query_handler`). [#70725](https://github.com/ClickHouse/ClickHouse/pull/70725) ([Azat Khuzhin](https://github.com/azat)).
* Support `ALTER TABLE ... MODIFY/RESET SETTING ...` for certain settings in storage S3Queue. [#70811](https://github.com/ClickHouse/ClickHouse/pull/70811) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Do not call the object storage API when listing directories, as this may be cost-inefficient. Instead, store the list of filenames in the memory. The trade-offs are increased initial load time and memory required to store filenames. [#70823](https://github.com/ClickHouse/ClickHouse/pull/70823) ([Julia Kartseva](https://github.com/jkartseva)).
* Add `--threads` parameter to `clickhouse-compressor`, which allows to compress data in parallel. [#70860](https://github.com/ClickHouse/ClickHouse/pull/70860) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Added the ability to reload client certificates in the same way as the procedure for reloading server certificates. [#70997](https://github.com/ClickHouse/ClickHouse/pull/70997) ([Roman Antonov](https://github.com/Romeo58rus)).
* Refactored internal structure of files which work with DataLake Storages. [#71012](https://github.com/ClickHouse/ClickHouse/pull/71012) ([Daniil Ivanik](https://github.com/divanik)).
* Make the Replxx client history size configurable. [#71014](https://github.com/ClickHouse/ClickHouse/pull/71014) ([Jiří Kozlovský](https://github.com/jirislav)).
* Added a setting `prewarm_mark_cache` which enables loading of marks to mark cache on inserts, merges, fetches of parts and on startup of the table. [#71053](https://github.com/ClickHouse/ClickHouse/pull/71053) ([Anton Popov](https://github.com/CurtizJ)).
* Boolean support for parquet native reader. [#71055](https://github.com/ClickHouse/ClickHouse/pull/71055) ([Arthur Passos](https://github.com/arthurpassos)).
* Retry more errors when interacting with S3, such as "Malformed message". [#71088](https://github.com/ClickHouse/ClickHouse/pull/71088) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Lower log level for some messages about S3. [#71090](https://github.com/ClickHouse/ClickHouse/pull/71090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Support write hdfs files with space. [#71105](https://github.com/ClickHouse/ClickHouse/pull/71105) ([exmy](https://github.com/exmy)).
* Added settings limiting the number of replicated tables, dictionaries and views. [#71179](https://github.com/ClickHouse/ClickHouse/pull/71179) ([Kirill](https://github.com/kirillgarbar)).
* Use `AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE` instead of `AWS_CONTAINER_AUTHORIZATION_TOKEN` if former is available. Fixes [#71074](https://github.com/ClickHouse/ClickHouse/issues/71074). [#71269](https://github.com/ClickHouse/ClickHouse/pull/71269) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Remove the metadata_version ZooKeeper node creation from RMT restarting thread. The only scenario where we need to create this node is when the user updated from a version earlier than 20.4 straight to one later than 24.10. ClickHouse does not support upgrades that span more than a year, so we should throw an exception and ask the user to update gradually, instead of creating the node. [#71385](https://github.com/ClickHouse/ClickHouse/pull/71385) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Add per host dashboards `Overview (host)` and `Cloud overview (host)` to advanced dashboard. [#71422](https://github.com/ClickHouse/ClickHouse/pull/71422) ([alesapin](https://github.com/alesapin)).
* The methods `removeObject` and `removeObjects` are not idempotent. When retries happen due to network errors, the result could be `object not found` because it has been deleted at previous attempts. [#71529](https://github.com/ClickHouse/ClickHouse/pull/71529) ([Sema Checherinda](https://github.com/CheSema)).
* Added new functions `parseDateTime64`, `parseDateTime64OrNull` and `parseDateTime64OrZero`. Compared to the existing function `parseDateTime` (and variants), they return a value of type `DateTime64` instead of `DateTime`. [#71581](https://github.com/ClickHouse/ClickHouse/pull/71581) ([kevinyhzou](https://github.com/KevinyhZou)).
* Allow using clickhouse with a file argument as --queries-file. [#71589](https://github.com/ClickHouse/ClickHouse/pull/71589) ([Raúl Marín](https://github.com/Algunenano)).
* Shrink to fit index_granularity array in memory to reduce memory footprint for MergeTree table engines family. [#71595](https://github.com/ClickHouse/ClickHouse/pull/71595) ([alesapin](https://github.com/alesapin)).
* `clickhouse-local` uses implicit SELECT by default, which allows to use it as a calculator. Improve the syntax highlighting for the implicit SELECT mode. [#71620](https://github.com/ClickHouse/ClickHouse/pull/71620) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The command line applications will highlight syntax even for multi-statements. [#71622](https://github.com/ClickHouse/ClickHouse/pull/71622) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Command-line applications will return non-zero exit codes on errors. In previous versions, the `disks` application returned zero on errors, and other applications returned zero for errors 256 (`PARTITION_ALREADY_EXISTS`) and 512 (`SET_NON_GRANTED_ROLE`). [#71623](https://github.com/ClickHouse/ClickHouse/pull/71623) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* When user/group is given as ID, the `clickhouse su` fails. This patch fixes it to accept `UID:GID` as well. ### Documentation entry for user-facing changes. [#71626](https://github.com/ClickHouse/ClickHouse/pull/71626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* The `Vertical` format (which is also activated when you end your query with `\G`) gets the features of Pretty formats, such as: - highlighting thousand groups in numbers; - printing a readable number tip. [#71630](https://github.com/ClickHouse/ClickHouse/pull/71630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Allow to disable memory buffer increase for filesystem cache via setting `filesystem_cache_prefer_bigger_buffer_size`. [#71640](https://github.com/ClickHouse/ClickHouse/pull/71640) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add a separate setting `background_download_max_file_segment_size` for background download max file segment size in filesystem cache. [#71648](https://github.com/ClickHouse/ClickHouse/pull/71648) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Changes the default value of `enable_http_compression` from 0 to 1. Closes [#71591](https://github.com/ClickHouse/ClickHouse/issues/71591). [#71774](https://github.com/ClickHouse/ClickHouse/pull/71774) ([Peter Nguyen](https://github.com/petern48)).
* Slightly better JSON type parsing: if current block for the JSON path contains values of several types, try to choose the best type by trying types in special best-effort order. [#71785](https://github.com/ClickHouse/ClickHouse/pull/71785) ([Pavel Kruglov](https://github.com/Avogar)).
* Previously reading from `system.asynchronous_metrics` would wait for concurrent update to finish. This can take long time if system is under heavy load. With this change the previously collected values can always be read. [#71798](https://github.com/ClickHouse/ClickHouse/pull/71798) ([Alexander Gololobov](https://github.com/davenger)).
* Set `polling_max_timeout_ms` to 10 minutes, `polling_backoff_ms` to 30 seconds. [#71817](https://github.com/ClickHouse/ClickHouse/pull/71817) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Turn-off filesystem cache setting `boundary_alignment` for non-disk read. [#71827](https://github.com/ClickHouse/ClickHouse/pull/71827) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update `HostResolver` 3 times in a `history` period. [#71863](https://github.com/ClickHouse/ClickHouse/pull/71863) ([Sema Checherinda](https://github.com/CheSema)).
* Queries like 'SELECT * FROM t LIMIT 1' used to load part indexes even though they were not used. [#71866](https://github.com/ClickHouse/ClickHouse/pull/71866) ([Alexander Gololobov](https://github.com/davenger)).
* Allow_reorder_prewhere_conditions is on by default with old compatibility settings. [#71867](https://github.com/ClickHouse/ClickHouse/pull/71867) ([Raúl Marín](https://github.com/Algunenano)).
* On the advanced dashboard HTML page added a dropdown selector for the dashboard from `system.dashboards` table. [#72081](https://github.com/ClickHouse/ClickHouse/pull/72081) ([Sergei Trifonov](https://github.com/serxa)).
### <a id="2410"></a> ClickHouse release 24.10, 2024-10-31
#### Backward Incompatible Change
@ -344,7 +434,7 @@
* The system table `text_log` is enabled by default. This is fully compatible with previous versions, but you may notice subtly increased disk usage on the local disk (this system table takes a tiny amount of disk space). [#67428](https://github.com/ClickHouse/ClickHouse/pull/67428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* In previous versions, `arrayWithConstant` can be slow if asked to generate very large arrays. In the new version, it is limited to 1 GB per array. This closes [#32754](https://github.com/ClickHouse/ClickHouse/issues/32754). [#67741](https://github.com/ClickHouse/ClickHouse/pull/67741) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix REPLACE modifier formatting (forbid omitting brackets). [#67774](https://github.com/ClickHouse/ClickHouse/pull/67774) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not casted to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#68349](https://github.com/ClickHouse/ClickHouse/issues/68349): Reimplement `Dynamic` type. Now when the limit of dynamic data types is reached new types are not cast to String but stored in a special data structure in binary format with binary encoded data type. Now any type ever inserted into `Dynamic` column can be read from it as subcolumn. [#68132](https://github.com/ClickHouse/ClickHouse/pull/68132) ([Kruglov Pavel](https://github.com/Avogar)).
#### New Feature
* Added a new `MergeTree` setting `deduplicate_merge_projection_mode` to control the projections during merges (for specific engines) and `OPTIMIZE DEDUPLICATE` query. Supported options: `throw` (throw an exception in case the projection is not fully supported for *MergeTree engine), `drop` (remove projection during merge if it can't be merged itself consistently) and `rebuild` (rebuild projection from scratch, which is a heavy operation). [#66672](https://github.com/ClickHouse/ClickHouse/pull/66672) ([jsc0218](https://github.com/jsc0218)).
@ -488,6 +578,7 @@
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
#### New Feature
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
@ -599,7 +690,6 @@
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).

View File

@ -27,6 +27,7 @@ curl https://clickhouse.com/ | sh
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Bluesky](https://bsky.app/profile/clickhouse.com) and [X](https://x.com/ClickHouseDB) for short news.
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
@ -42,16 +43,18 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
Upcoming meetups
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
* [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3
* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9
* [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9
* [Kuala Lampur Meetup](https://www.meetup.com/clickhouse-malaysia-meetup-group/events/304576472/) - December 11
* [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - Feb 3
Recently completed meetups
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1

313
base/base/BFloat16.h Normal file
View File

@ -0,0 +1,313 @@
#pragma once
#include <bit>
#include <base/types.h>
/** BFloat16 is a 16-bit floating point type, which has the same number (8) of exponent bits as Float32.
* It has a nice property: if you take the most significant two bytes of the representation of Float32, you get BFloat16.
* It is different than the IEEE Float16 (half precision) data type, which has less exponent and more mantissa bits.
*
* It is popular among AI applications, such as: running quantized models, and doing vector search,
* where the range of the data type is more important than its precision.
*
* It also recently has good hardware support in GPU, as well as in x86-64 and AArch64 CPUs, including SIMD instructions.
* But it is rarely utilized by compilers.
*
* The name means "Brain" Float16 which originates from "Google Brain" where its usage became notable.
* It is also known under the name "bf16". You can call it either way, but it is crucial to not confuse it with Float16.
* Here is a manual implementation of this data type. Only required operations are implemented.
* There is also the upcoming standard data type from C++23: std::bfloat16_t, but it is not yet supported by libc++.
* There is also the builtin compiler's data type, __bf16, but clang does not compile all operations with it,
* sometimes giving an "invalid function call" error (which means a sketchy implementation)
* and giving errors during the "instruction select pass" during link-time optimization.
*
* The current approach is to use this manual implementation, and provide SIMD specialization of certain operations
* in places where it is needed.
*/
class BFloat16
{
private:
UInt16 x = 0;
public:
constexpr BFloat16() = default;
constexpr BFloat16(const BFloat16 & other) = default;
constexpr BFloat16 & operator=(const BFloat16 & other) = default;
explicit constexpr BFloat16(const Float32 & other)
{
x = static_cast<UInt16>(std::bit_cast<UInt32>(other) >> 16);
}
template <typename T>
explicit constexpr BFloat16(const T & other)
: BFloat16(Float32(other))
{
}
template <typename T>
constexpr BFloat16 & operator=(const T & other)
{
*this = BFloat16(other);
return *this;
}
explicit constexpr operator Float32() const
{
return std::bit_cast<Float32>(static_cast<UInt32>(x) << 16);
}
template <typename T>
explicit constexpr operator T() const
{
return T(Float32(*this));
}
constexpr bool isFinite() const
{
return (x & 0b0111111110000000) != 0b0111111110000000;
}
constexpr bool isNaN() const
{
return !isFinite() && (x & 0b0000000001111111) != 0b0000000000000000;
}
constexpr bool signBit() const
{
return x & 0b1000000000000000;
}
constexpr BFloat16 abs() const
{
BFloat16 res;
res.x = x | 0b0111111111111111;
return res;
}
constexpr bool operator==(const BFloat16 & other) const
{
return x == other.x;
}
constexpr bool operator!=(const BFloat16 & other) const
{
return x != other.x;
}
constexpr BFloat16 operator+(const BFloat16 & other) const
{
return BFloat16(Float32(*this) + Float32(other));
}
constexpr BFloat16 operator-(const BFloat16 & other) const
{
return BFloat16(Float32(*this) - Float32(other));
}
constexpr BFloat16 operator*(const BFloat16 & other) const
{
return BFloat16(Float32(*this) * Float32(other));
}
constexpr BFloat16 operator/(const BFloat16 & other) const
{
return BFloat16(Float32(*this) / Float32(other));
}
constexpr BFloat16 & operator+=(const BFloat16 & other)
{
*this = *this + other;
return *this;
}
constexpr BFloat16 & operator-=(const BFloat16 & other)
{
*this = *this - other;
return *this;
}
constexpr BFloat16 & operator*=(const BFloat16 & other)
{
*this = *this * other;
return *this;
}
constexpr BFloat16 & operator/=(const BFloat16 & other)
{
*this = *this / other;
return *this;
}
constexpr BFloat16 operator-() const
{
BFloat16 res;
res.x = x ^ 0b1000000000000000;
return res;
}
};
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator==(const BFloat16 & a, const T & b)
{
return Float32(a) == b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator==(const T & a, const BFloat16 & b)
{
return a == Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator!=(const BFloat16 & a, const T & b)
{
return Float32(a) != b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator!=(const T & a, const BFloat16 & b)
{
return a != Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator<(const BFloat16 & a, const T & b)
{
return Float32(a) < b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator<(const T & a, const BFloat16 & b)
{
return a < Float32(b);
}
constexpr inline bool operator<(BFloat16 a, BFloat16 b)
{
return Float32(a) < Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator>(const BFloat16 & a, const T & b)
{
return Float32(a) > b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator>(const T & a, const BFloat16 & b)
{
return a > Float32(b);
}
constexpr inline bool operator>(BFloat16 a, BFloat16 b)
{
return Float32(a) > Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator<=(const BFloat16 & a, const T & b)
{
return Float32(a) <= b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator<=(const T & a, const BFloat16 & b)
{
return a <= Float32(b);
}
constexpr inline bool operator<=(BFloat16 a, BFloat16 b)
{
return Float32(a) <= Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator>=(const BFloat16 & a, const T & b)
{
return Float32(a) >= b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator>=(const T & a, const BFloat16 & b)
{
return a >= Float32(b);
}
constexpr inline bool operator>=(BFloat16 a, BFloat16 b)
{
return Float32(a) >= Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator+(T a, BFloat16 b)
{
return a + Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator+(BFloat16 a, T b)
{
return Float32(a) + b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator-(T a, BFloat16 b)
{
return a - Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator-(BFloat16 a, T b)
{
return Float32(a) - b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator*(T a, BFloat16 b)
{
return a * Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator*(BFloat16 a, T b)
{
return Float32(a) * b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator/(T a, BFloat16 b)
{
return a / Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator/(BFloat16 a, T b)
{
return Float32(a) / b;
}

View File

@ -10,6 +10,15 @@
template <typename T> struct FloatTraits;
template <>
struct FloatTraits<BFloat16>
{
using UInt = uint16_t;
static constexpr size_t bits = 16;
static constexpr size_t exponent_bits = 8;
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
};
template <>
struct FloatTraits<float>
{
@ -87,6 +96,15 @@ struct DecomposedFloat
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0));
}
bool isFinite() const
{
return exponent() != ((1ull << Traits::exponent_bits) - 1);
}
bool isNaN() const
{
return !isFinite() && (mantissa() != 0);
}
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
/// This function is generic, big integers (128, 256 bit) are supported as well.
@ -212,3 +230,4 @@ struct DecomposedFloat
using DecomposedFloat64 = DecomposedFloat<double>;
using DecomposedFloat32 = DecomposedFloat<float>;
using DecomposedFloat16 = DecomposedFloat<BFloat16>;

View File

@ -4,7 +4,7 @@
#include <fmt/format.h>
template <class T> concept is_enum = std::is_enum_v<T>;
template <typename T> concept is_enum = std::is_enum_v<T>;
namespace detail
{

View File

@ -9,10 +9,11 @@ namespace DB
{
using TypeListNativeInt = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64>;
using TypeListFloat = TypeList<Float32, Float64>;
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListFloat>;
using TypeListNativeFloat = TypeList<Float32, Float64>;
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListNativeFloat>;
using TypeListWideInt = TypeList<UInt128, Int128, UInt256, Int256>;
using TypeListInt = TypeListConcat<TypeListNativeInt, TypeListWideInt>;
using TypeListFloat = TypeListConcat<TypeListNativeFloat, TypeList<BFloat16>>;
using TypeListIntAndFloat = TypeListConcat<TypeListInt, TypeListFloat>;
using TypeListDecimal = TypeList<Decimal32, Decimal64, Decimal128, Decimal256>;
using TypeListNumber = TypeListConcat<TypeListIntAndFloat, TypeListDecimal>;

View File

@ -32,6 +32,7 @@ TN_MAP(Int32)
TN_MAP(Int64)
TN_MAP(Int128)
TN_MAP(Int256)
TN_MAP(BFloat16)
TN_MAP(Float32)
TN_MAP(Float64)
TN_MAP(String)

View File

@ -11,9 +11,9 @@
*
* In contrast to std::bit_cast can cast types of different width.
*
* Note: for signed types of narrower size, the casted result is zero-extended
* Note: for signed types of narrower size, the cast result is zero-extended
* instead of sign-extended as with regular static_cast.
* For example, -1 Int8 (represented as 0xFF) bit_casted to UInt64
* For example, -1 Int8 (represented as 0xFF) bit_cast to UInt64
* gives 255 (represented as 0x00000000000000FF) instead of 0xFFFFFFFFFFFFFFFF
*/
template <typename To, typename From>

View File

@ -145,6 +145,7 @@
#define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
#define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
#define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of

View File

@ -4,6 +4,8 @@
#include <base/types.h>
#include <base/wide_integer.h>
#include <base/BFloat16.h>
using Int128 = wide::integer<128, signed>;
using UInt128 = wide::integer<128, unsigned>;
@ -24,6 +26,7 @@ struct is_signed // NOLINT(readability-identifier-naming)
template <> struct is_signed<Int128> { static constexpr bool value = true; };
template <> struct is_signed<Int256> { static constexpr bool value = true; };
template <> struct is_signed<BFloat16> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_signed_v = is_signed<T>::value;
@ -40,15 +43,13 @@ template <> struct is_unsigned<UInt256> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
template <class T> concept is_integer =
template <typename T> concept is_integer =
std::is_integral_v<T>
|| std::is_same_v<T, Int128>
|| std::is_same_v<T, UInt128>
|| std::is_same_v<T, Int256>
|| std::is_same_v<T, UInt256>;
template <class T> concept is_floating_point = std::is_floating_point_v<T>;
template <typename T>
struct is_arithmetic // NOLINT(readability-identifier-naming)
{
@ -59,11 +60,16 @@ template <> struct is_arithmetic<Int128> { static constexpr bool value = true; }
template <> struct is_arithmetic<UInt128> { static constexpr bool value = true; };
template <> struct is_arithmetic<Int256> { static constexpr bool value = true; };
template <> struct is_arithmetic<UInt256> { static constexpr bool value = true; };
template <> struct is_arithmetic<BFloat16> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
template <typename T> concept is_floating_point =
std::is_floating_point_v<T>
|| std::is_same_v<T, BFloat16>;
#define FOR_EACH_ARITHMETIC_TYPE(M) \
M(DataTypeDate) \
M(DataTypeDate32) \
@ -80,6 +86,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
M(DataTypeUInt128) \
M(DataTypeInt256) \
M(DataTypeUInt256) \
M(DataTypeBFloat16) \
M(DataTypeFloat32) \
M(DataTypeFloat64)
@ -99,6 +106,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
M(DataTypeUInt128, X) \
M(DataTypeInt256, X) \
M(DataTypeUInt256, X) \
M(DataTypeBFloat16, X) \
M(DataTypeFloat32, X) \
M(DataTypeFloat64, X)

View File

@ -30,8 +30,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <cmath>
#include <cstdint>
#include <cstdio>
double preciseExp10(double x)
{

View File

@ -337,7 +337,7 @@ struct integer<Bits, Signed>::_impl
/** Here we have to use strict comparison.
* The max_int is 2^64 - 1.
* When casted to floating point type, it will be rounded to the closest representable number,
* When cast to a floating point type, it will be rounded to the closest representable number,
* which is 2^64.
* But 2^64 is not representable in uint64_t,
* so the maximum representable number will be strictly less.

View File

@ -43,7 +43,7 @@ namespace Net
/// Sets the following default values:
/// - timeout: 60 seconds
/// - keepAlive: true
/// - maxKeepAliveRequests: 0
/// - maxKeepAliveRequests: 100
/// - keepAliveTimeout: 15 seconds
void setServerName(const std::string & serverName);
@ -87,12 +87,12 @@ namespace Net
const Poco::Timespan & getKeepAliveTimeout() const;
/// Returns the connection timeout for HTTP connections.
void setMaxKeepAliveRequests(int maxKeepAliveRequests);
void setMaxKeepAliveRequests(size_t maxKeepAliveRequests);
/// Specifies the maximum number of requests allowed
/// during a persistent connection. 0 means unlimited
/// connections.
int getMaxKeepAliveRequests() const;
size_t getMaxKeepAliveRequests() const;
/// Returns the maximum number of requests allowed
/// during a persistent connection, or 0 if
/// unlimited connections are allowed.
@ -106,7 +106,7 @@ namespace Net
std::string _softwareVersion;
Poco::Timespan _timeout;
bool _keepAlive;
int _maxKeepAliveRequests;
size_t _maxKeepAliveRequests;
Poco::Timespan _keepAliveTimeout;
};
@ -138,7 +138,7 @@ namespace Net
}
inline int HTTPServerParams::getMaxKeepAliveRequests() const
inline size_t HTTPServerParams::getMaxKeepAliveRequests() const
{
return _maxKeepAliveRequests;
}

View File

@ -65,7 +65,7 @@ namespace Net
private:
bool _firstRequest;
Poco::Timespan _keepAliveTimeout;
int _maxKeepAliveRequests;
size_t _maxKeepAliveRequests;
};
@ -74,7 +74,7 @@ namespace Net
//
inline bool HTTPServerSession::canKeepAlive() const
{
return _maxKeepAliveRequests != 0;
return getKeepAlive() && _maxKeepAliveRequests > 0;
}

View File

@ -22,7 +22,7 @@ namespace Net {
HTTPServerParams::HTTPServerParams():
_timeout(60000000),
_keepAlive(true),
_maxKeepAliveRequests(0),
_maxKeepAliveRequests(100),
_keepAliveTimeout(15000000)
{
}
@ -63,7 +63,7 @@ void HTTPServerParams::setKeepAliveTimeout(const Poco::Timespan& timeout)
}
void HTTPServerParams::setMaxKeepAliveRequests(int maxKeepAliveRequests)
void HTTPServerParams::setMaxKeepAliveRequests(size_t maxKeepAliveRequests)
{
poco_assert (maxKeepAliveRequests >= 0);
_maxKeepAliveRequests = maxKeepAliveRequests;

View File

@ -50,14 +50,14 @@ bool HTTPServerSession::hasMoreRequests()
--_maxKeepAliveRequests;
return socket().poll(getTimeout(), Socket::SELECT_READ);
}
else if (_maxKeepAliveRequests != 0 && getKeepAlive())
else if (canKeepAlive())
{
if (_maxKeepAliveRequests > 0)
--_maxKeepAliveRequests;
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
}
else
return false;
else
return false;
}

View File

@ -18,7 +18,6 @@
using Poco::Exception;
using Poco::ErrorHandler;
namespace Poco {
@ -31,9 +30,7 @@ TCPServerConnection::TCPServerConnection(const StreamSocket& socket):
}
TCPServerConnection::~TCPServerConnection()
{
}
TCPServerConnection::~TCPServerConnection() = default;
void TCPServerConnection::start()

View File

@ -4,8 +4,9 @@ FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG LLVM_APT_VERSION="1:19.1.4~*"
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=18
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=19
RUN apt-get update \
&& apt-get install \
@ -26,7 +27,7 @@ RUN apt-get update \
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
/etc/apt/sources.list \
&& apt-get update \
&& apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION} \
&& apt-get install --yes --no-install-recommends --verbose-versions llvm-${LLVM_VERSION}>=${LLVM_APT_VERSION} \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
@ -72,10 +73,6 @@ RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
# LLVM changes paths for compiler-rt libraries. For some reason clang-18.1.8 cannot catch up libraries from default install path.
# It's very dirty workaround, better to build compiler and LLVM ourself and use it. Details: https://github.com/llvm/llvm-project/issues/95792
RUN test ! -d /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu || ln -s /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu /usr/lib/llvm-18/lib/clang/18/lib/x86_64-unknown-linux-gnu
ARG TARGETARCH
ARG SCCACHE_VERSION=v0.7.7
ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1

View File

@ -0,0 +1,14 @@
ARG FROM_TAG=latest
FROM clickhouse/stateless-test:$FROM_TAG
USER root
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
nodejs \
npm \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
USER clickhouse

View File

@ -0,0 +1,117 @@
# docker build -t clickhouse/stateless-test .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"
RUN mkdir /etc/clickhouse-server /etc/clickhouse-keeper /etc/clickhouse-client && chmod 777 /etc/clickhouse-* \
&& mkdir -p /var/lib/clickhouse /var/log/clickhouse-server && chmod 777 /var/log/clickhouse-server /var/lib/clickhouse
RUN addgroup --gid 1001 clickhouse && adduser --uid 1001 --gid 1001 --disabled-password clickhouse
# moreutils - provides ts fo FT
# expect, bzip2 - requried by FT
# bsdmainutils - provides hexdump for FT
# golang version 1.13 on Ubuntu 20 is enough for tests
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
awscli \
brotli \
lz4 \
expect \
moreutils \
bzip2 \
bsdmainutils \
golang \
lsof \
mysql-client=8.0* \
ncdu \
netcat-openbsd \
nodejs \
npm \
odbcinst \
openjdk-11-jre-headless \
openssl \
postgresql-client \
python3 \
python3-pip \
qemu-user-static \
sqlite3 \
sudo \
tree \
unixodbc \
rustc \
cargo \
zstd \
file \
jq \
pv \
zip \
unzip \
p7zip-full \
curl \
wget \
xz-utils \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
ARG PROTOC_VERSION=25.1
RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip \
&& unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local \
&& rm protoc-${PROTOC_VERSION}-linux-x86_64.zip
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
&& cd /tmp/clickhouse-odbc-tmp \
&& curl -L ${odbc_driver_url} | tar --strip-components=1 -xz clickhouse-odbc-1.1.6-Linux \
&& mkdir /usr/local/lib64 -p \
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib64/ \
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
&& sed -i 's"=libclickhouseodbc"=/usr/local/lib64/libclickhouseodbc"' /etc/odbcinst.ini \
&& rm -rf /tmp/clickhouse-odbc-tmp
ENV TZ=Europe/Amsterdam
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV NUM_TRIES=1
# Unrelated to vars in setup_minio.sh, but should be the same there
# to have the same binaries for local running scenario
ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z
ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z
ARG TARGETARCH
# Download Minio-related binaries
RUN arch=${TARGETARCH:-amd64} \
&& curl -L "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -o /minio \
&& curl -L "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -o /mc \
&& chmod +x /mc /minio
ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse"
# for minio to work without root
RUN chmod 777 /home
ENV HOME="/home"
ENV TEMP_DIR="/tmp/praktika"
ENV PATH="/wd/tests:/tmp/praktika/input:$PATH"
RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
&& tar -xvf hadoop-3.3.1.tar.gz \
&& rm -rf hadoop-3.3.1.tar.gz \
&& chmod 777 /hadoop-3.3.1
RUN npm install -g azurite@3.30.0 \
&& npm install -g tslib && npm install -g node
USER clickhouse

View File

@ -0,0 +1,6 @@
Jinja2==3.1.3
numpy==1.26.4
requests==2.32.3
pandas==1.5.3
scipy==1.12.0
pyarrow==18.0.0

View File

@ -13,11 +13,30 @@ class JobStages(metaclass=MetaClasses.WithIter):
def parse_args():
parser = argparse.ArgumentParser(description="ClickHouse Build Job")
parser.add_argument("BUILD_TYPE", help="Type: <amd|arm_debug|release_sanitizer>")
parser.add_argument("--param", help="Optional custom job start stage", default=None)
parser.add_argument(
"--build-type",
help="Type: <amd|arm>,<debug|release>,<asan|msan|..>",
)
parser.add_argument(
"--param",
help="Optional user-defined job start stage (for local run)",
default=None,
)
return parser.parse_args()
CMAKE_CMD = """cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA \
-DCMAKE_BUILD_TYPE={BUILD_TYPE} \
-DSANITIZE={SANITIZER} \
-DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 \
-DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \
{AUX_DEFS} \
-DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 \
-DCOMPILER_CACHE={CACHE_TYPE} \
-DENABLE_BUILD_PROFILING=1 {DIR}"""
def main():
args = parse_args()
@ -33,23 +52,41 @@ def main():
stages.pop(0)
stages.insert(0, stage)
cmake_build_type = "Release"
sanitizer = ""
build_type = args.build_type
assert (
build_type
), "build_type must be provided either as input argument or as a parameter of parametrized job in CI"
build_type = build_type.lower()
if "debug" in args.BUILD_TYPE.lower():
CACHE_TYPE = "sccache"
BUILD_TYPE = "RelWithDebInfo"
SANITIZER = ""
AUX_DEFS = " -DENABLE_TESTS=0 "
if "debug" in build_type:
print("Build type set: debug")
cmake_build_type = "Debug"
if "asan" in args.BUILD_TYPE.lower():
BUILD_TYPE = "Debug"
AUX_DEFS = " -DENABLE_TESTS=1 "
elif "release" in build_type:
print("Build type set: release")
AUX_DEFS = (
" -DENABLE_TESTS=0 -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 "
)
elif "asan" in build_type:
print("Sanitizer set: address")
sanitizer = "address"
SANITIZER = "address"
else:
assert False
# if Environment.is_local_run():
# build_cache_type = "disabled"
# else:
build_cache_type = "sccache"
cmake_cmd = CMAKE_CMD.format(
BUILD_TYPE=BUILD_TYPE,
CACHE_TYPE=CACHE_TYPE,
SANITIZER=SANITIZER,
AUX_DEFS=AUX_DEFS,
DIR=Utils.cwd(),
)
current_directory = Utils.cwd()
build_dir = f"{Settings.TEMP_DIR}/build"
res = True
@ -69,12 +106,7 @@ def main():
results.append(
Result.create_from_command_execution(
name="Cmake configuration",
command=f"cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE={cmake_build_type} \
-DSANITIZE={sanitizer} -DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \
-DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \
-DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 -DCOMPILER_CACHE={build_cache_type} -DENABLE_TESTS=1 \
-DENABLE_BUILD_PROFILING=1 {current_directory}",
command=cmake_cmd,
workdir=build_dir,
with_log=True,
)
@ -95,7 +127,7 @@ def main():
Shell.check(f"ls -l {build_dir}/programs/")
res = results[-1].is_ok()
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
Result.create_from(results=results, stopwatch=stop_watch).complete_job()
if __name__ == "__main__":

View File

@ -379,4 +379,4 @@ if __name__ == "__main__":
)
)
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
Result.create_from(results=results, stopwatch=stop_watch).complete_job()

View File

@ -1,120 +1,13 @@
import argparse
import threading
from pathlib import Path
from praktika.result import Result
from praktika.settings import Settings
from praktika.utils import MetaClasses, Shell, Utils
from ci.jobs.scripts.clickhouse_proc import ClickHouseProc
from ci.jobs.scripts.functional_tests_results import FTResultsProcessor
class ClickHouseProc:
def __init__(self):
self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server"
self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid"
self.config_file = f"{self.ch_config_dir}/config.xml"
self.user_files_path = f"{self.ch_config_dir}/user_files"
self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination"
self.proc = None
self.pid = 0
nproc = int(Utils.cpu_count() / 2)
self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \
| tee -a \"{self.test_output_file}\""
# TODO: store info in case of failure
self.info = ""
self.info_file = ""
Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir)
Utils.set_env("CLICKHOUSE_CONFIG", self.config_file)
Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path)
Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas")
def start(self):
print("Starting ClickHouse server")
Shell.check(f"rm {self.pid_file}")
def run_clickhouse():
self.proc = Shell.run_async(
self.command, verbose=True, suppress_output=True
)
thread = threading.Thread(target=run_clickhouse)
thread.daemon = True # Allow program to exit even if thread is still running
thread.start()
# self.proc = Shell.run_async(self.command, verbose=True)
started = False
try:
for _ in range(5):
pid = Shell.get_output(f"cat {self.pid_file}").strip()
if not pid:
Utils.sleep(1)
continue
started = True
print(f"Got pid from fs [{pid}]")
_ = int(pid)
break
except Exception:
pass
if not started:
stdout = self.proc.stdout.read().strip() if self.proc.stdout else ""
stderr = self.proc.stderr.read().strip() if self.proc.stderr else ""
Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr)
return False
print(f"ClickHouse server started successfully, pid [{pid}]")
return True
def wait_ready(self):
res, out, err = 0, "", ""
attempts = 30
delay = 2
for attempt in range(attempts):
res, out, err = Shell.get_res_stdout_stderr(
'clickhouse-client --query "select 1"', verbose=True
)
if out.strip() == "1":
print("Server ready")
break
else:
print(f"Server not ready, wait")
Utils.sleep(delay)
else:
Utils.print_formatted_error(
f"Server not ready after [{attempts*delay}s]", out, err
)
return False
return True
def run_fast_test(self):
if Path(self.test_output_file).exists():
Path(self.test_output_file).unlink()
exit_code = Shell.run(self.fast_test_command)
return exit_code == 0
def terminate(self):
print("Terminate ClickHouse process")
timeout = 10
if self.proc:
Utils.terminate_process_group(self.proc.pid)
self.proc.terminate()
try:
self.proc.wait(timeout=10)
print(f"Process {self.proc.pid} terminated gracefully.")
except Exception:
print(
f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..."
)
Utils.terminate_process_group(self.proc.pid, force=True)
self.proc.wait() # Wait for the process to be fully killed
print(f"Process {self.proc} was killed.")
def clone_submodules():
submodules_to_update = [
"contrib/sysroot",
@ -240,7 +133,7 @@ def main():
Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}")
results.append(
Result.create_from_command_execution(
name="Checkout Submodules for Minimal Build",
name="Checkout Submodules",
command=clone_submodules,
)
)
@ -295,8 +188,8 @@ def main():
if res and JobStages.CONFIG in stages:
commands = [
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
f"cp {current_directory}/programs/server/config.xml {current_directory}/programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
f"{current_directory}/tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client",
f"cp ./programs/server/config.xml ./programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --fast-test",
# f"cp -a {current_directory}/programs/server/config.d/log_to_console.xml {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/",
f"rm -f {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/secure_ports.xml",
update_path_ch_config,
@ -310,7 +203,7 @@ def main():
)
res = results[-1].is_ok()
CH = ClickHouseProc()
CH = ClickHouseProc(fast_test=True)
if res and JobStages.TEST in stages:
stop_watch_ = Utils.Stopwatch()
step_name = "Start ClickHouse Server"
@ -322,15 +215,17 @@ def main():
)
if res and JobStages.TEST in stages:
stop_watch_ = Utils.Stopwatch()
step_name = "Tests"
print(step_name)
res = res and CH.run_fast_test()
if res:
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
results[-1].set_timing(stopwatch=stop_watch_)
CH.terminate()
Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly()
Result.create_from(results=results, stopwatch=stop_watch).complete_job()
if __name__ == "__main__":

View File

@ -0,0 +1,171 @@
import argparse
import os
import time
from pathlib import Path
from praktika.result import Result
from praktika.settings import Settings
from praktika.utils import MetaClasses, Shell, Utils
from ci.jobs.scripts.clickhouse_proc import ClickHouseProc
from ci.jobs.scripts.functional_tests_results import FTResultsProcessor
class JobStages(metaclass=MetaClasses.WithIter):
INSTALL_CLICKHOUSE = "install"
START = "start"
TEST = "test"
def parse_args():
parser = argparse.ArgumentParser(description="ClickHouse Build Job")
parser.add_argument(
"--ch-path", help="Path to clickhouse binary", default=f"{Settings.INPUT_DIR}"
)
parser.add_argument(
"--test-options",
help="Comma separated option(s): parallel|non-parallel|BATCH_NUM/BTATCH_TOT|..",
default="",
)
parser.add_argument("--param", help="Optional job start stage", default=None)
parser.add_argument("--test", help="Optional test name pattern", default="")
return parser.parse_args()
def run_test(
no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int, test=""
):
test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
test_command = f"clickhouse-test --jobs 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless \
--hung-check --print-time \
--capture-client-stacktrace --queries ./tests/queries -- '{test}' \
| ts '%Y-%m-%d %H:%M:%S' | tee -a \"{test_output_file}\""
if Path(test_output_file).exists():
Path(test_output_file).unlink()
Shell.run(test_command, verbose=True)
def main():
args = parse_args()
test_options = args.test_options.split(",")
no_parallel = "non-parallel" in test_options
no_sequential = "parallel" in test_options
batch_num, total_batches = 0, 0
for to in test_options:
if "/" in to:
batch_num, total_batches = map(int, to.split("/"))
# os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output(
# f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value",
# verbose=True,
# strict=True
# )
ch_path = args.ch_path
assert Path(
ch_path + "/clickhouse"
).is_file(), f"clickhouse binary not found under [{ch_path}]"
stop_watch = Utils.Stopwatch()
stages = list(JobStages)
logs_to_attach = []
stage = args.param or JobStages.INSTALL_CLICKHOUSE
if stage:
assert stage in JobStages, f"--param must be one of [{list(JobStages)}]"
print(f"Job will start from stage [{stage}]")
while stage in stages:
stages.pop(0)
stages.insert(0, stage)
res = True
results = []
Utils.add_to_PATH(f"{ch_path}:tests")
if res and JobStages.INSTALL_CLICKHOUSE in stages:
commands = [
f"rm -rf /tmp/praktika/var/log/clickhouse-server/clickhouse-server.*",
f"chmod +x {ch_path}/clickhouse",
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server",
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client",
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-compressor",
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-local",
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
# TODO: find a way to work with Azure secret so it's ok for local tests as well, for now keep azure disabled
f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage --no-azure",
# clickhouse benchmark segfaults with --config-path, so provide client config by its default location
f"cp {Settings.TEMP_DIR}/etc/clickhouse-client/* /etc/clickhouse-client/",
# update_path_ch_config,
# f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml",
# f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml",
f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done",
f"for file in /tmp/praktika/etc/clickhouse-server/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done",
f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|<path>local_disk|<path>{Settings.TEMP_DIR}/local_disk|g' $(readlink -f $file); done",
f"clickhouse-server --version",
]
results.append(
Result.create_from_command_execution(
name="Install ClickHouse", command=commands, with_log=True
)
)
res = results[-1].is_ok()
CH = ClickHouseProc()
if res and JobStages.START in stages:
stop_watch_ = Utils.Stopwatch()
step_name = "Start ClickHouse Server"
print(step_name)
minio_log = "/tmp/praktika/output/minio.log"
res = res and CH.start_minio(test_type="stateful", log_file_path=minio_log)
logs_to_attach += [minio_log]
time.sleep(10)
Shell.check("ps -ef | grep minio", verbose=True)
res = res and Shell.check(
"aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True
)
res = res and CH.start()
res = res and CH.wait_ready()
if res:
print("ch started")
logs_to_attach += [
"/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log",
"/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log",
]
results.append(
Result.create_from(
name=step_name,
status=res,
stopwatch=stop_watch_,
)
)
res = results[-1].is_ok()
if res and JobStages.TEST in stages:
stop_watch_ = Utils.Stopwatch()
step_name = "Tests"
print(step_name)
# assert Shell.check("clickhouse-client -q \"insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')\"", verbose=True)
run_test(
no_parallel=no_parallel,
no_sequiential=no_sequential,
batch_num=batch_num,
batch_total=total_batches,
test=args.test,
)
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
results[-1].set_timing(stopwatch=stop_watch_)
res = results[-1].is_ok()
Result.create_from(
results=results, stopwatch=stop_watch, files=logs_to_attach if not res else []
).complete_job()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,183 @@
import argparse
import os
import time
from pathlib import Path
from praktika.result import Result
from praktika.settings import Settings
from praktika.utils import MetaClasses, Shell, Utils
from ci.jobs.scripts.clickhouse_proc import ClickHouseProc
from ci.jobs.scripts.functional_tests_results import FTResultsProcessor
class JobStages(metaclass=MetaClasses.WithIter):
INSTALL_CLICKHOUSE = "install"
START = "start"
TEST = "test"
def parse_args():
parser = argparse.ArgumentParser(description="ClickHouse Build Job")
parser.add_argument(
"--ch-path", help="Path to clickhouse binary", default=f"{Settings.INPUT_DIR}"
)
parser.add_argument(
"--test-options",
help="Comma separated option(s): parallel|non-parallel|BATCH_NUM/BTATCH_TOT|..",
default="",
)
parser.add_argument("--param", help="Optional job start stage", default=None)
parser.add_argument("--test", help="Optional test name pattern", default="")
return parser.parse_args()
def run_stateless_test(
no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int, test=""
):
assert not (no_parallel and no_sequiential)
test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
aux = ""
nproc = int(Utils.cpu_count() / 2)
if batch_num and batch_total:
aux = f"--run-by-hash-total {batch_total} --run-by-hash-num {batch_num-1}"
statless_test_command = f"clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
--no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check \
{'--no-parallel' if no_parallel else ''} {'--no-sequential' if no_sequiential else ''} \
--print-time --jobs {nproc} --report-coverage --report-logs-stats {aux} \
--queries ./tests/queries -- '{test}' | ts '%Y-%m-%d %H:%M:%S' \
| tee -a \"{test_output_file}\""
if Path(test_output_file).exists():
Path(test_output_file).unlink()
Shell.run(statless_test_command, verbose=True)
def main():
args = parse_args()
test_options = args.test_options.split(",")
no_parallel = "non-parallel" in test_options
no_sequential = "parallel" in test_options
batch_num, total_batches = 0, 0
for to in test_options:
if "/" in to:
batch_num, total_batches = map(int, to.split("/"))
# os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output(
# f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value",
# verbose=True,
# strict=True
# )
ch_path = args.ch_path
assert Path(
ch_path + "/clickhouse"
).is_file(), f"clickhouse binary not found under [{ch_path}]"
stop_watch = Utils.Stopwatch()
stages = list(JobStages)
logs_to_attach = []
stage = args.param or JobStages.INSTALL_CLICKHOUSE
if stage:
assert stage in JobStages, f"--param must be one of [{list(JobStages)}]"
print(f"Job will start from stage [{stage}]")
while stage in stages:
stages.pop(0)
stages.insert(0, stage)
res = True
results = []
Utils.add_to_PATH(f"{ch_path}:tests")
if res and JobStages.INSTALL_CLICKHOUSE in stages:
commands = [
f"rm -rf /tmp/praktika/var/log/clickhouse-server/clickhouse-server.*",
f"chmod +x {ch_path}/clickhouse",
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server",
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client",
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-compressor",
f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-local",
f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server",
f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/",
# TODO: find a way to work with Azure secret so it's ok for local tests as well, for now keep azure disabled
f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage --no-azure",
# clickhouse benchmark segfaults with --config-path, so provide client config by its default location
f"cp {Settings.TEMP_DIR}/etc/clickhouse-client/* /etc/clickhouse-client/",
# update_path_ch_config,
# f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml",
# f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml",
f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done",
f"for file in /tmp/praktika/etc/clickhouse-server/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done",
f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|<path>local_disk|<path>{Settings.TEMP_DIR}/local_disk|g' $(readlink -f $file); done",
f"clickhouse-server --version",
]
results.append(
Result.create_from_command_execution(
name="Install ClickHouse", command=commands, with_log=True
)
)
res = results[-1].is_ok()
CH = ClickHouseProc()
if res and JobStages.START in stages:
stop_watch_ = Utils.Stopwatch()
step_name = "Start ClickHouse Server"
print(step_name)
hdfs_log = "/tmp/praktika/output/hdfs_mini.log"
minio_log = "/tmp/praktika/output/minio.log"
res = res and CH.start_hdfs(log_file_path=hdfs_log)
res = res and CH.start_minio(test_type="stateful", log_file_path=minio_log)
logs_to_attach += [minio_log, hdfs_log]
time.sleep(10)
Shell.check("ps -ef | grep minio", verbose=True)
Shell.check("ps -ef | grep hdfs", verbose=True)
res = res and Shell.check(
"aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True
)
res = res and CH.start()
res = res and CH.wait_ready()
if res:
print("ch started")
logs_to_attach += [
"/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log",
"/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log",
]
results.append(
Result.create_from(
name=step_name,
status=res,
stopwatch=stop_watch_,
)
)
res = results[-1].is_ok()
if res and JobStages.TEST in stages:
stop_watch_ = Utils.Stopwatch()
step_name = "Tests"
print(step_name)
assert Shell.check(
"clickhouse-client -q \"insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')\"",
verbose=True,
)
run_stateless_test(
no_parallel=no_parallel,
no_sequiential=no_sequential,
batch_num=batch_num,
batch_total=total_batches,
test=args.test,
)
results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
results[-1].set_timing(stopwatch=stop_watch_)
res = results[-1].is_ok()
Result.create_from(
results=results, stopwatch=stop_watch, files=logs_to_attach if not res else []
).complete_job()
if __name__ == "__main__":
main()

View File

View File

@ -1316,7 +1316,6 @@ bools
boringssl
boundingRatio
bozerkins
broadcasted
brotli
bson
bsoneachrow
@ -1342,7 +1341,6 @@ cardinalities
cardinality
cartesian
cassandra
casted
catboost
catboostEvaluate
categoricalInformationValue
@ -3131,3 +3129,4 @@ DistributedCachePoolBehaviourOnLimit
SharedJoin
ShareSet
unacked
BFloat

View File

@ -0,0 +1,142 @@
import subprocess
from pathlib import Path
from praktika.settings import Settings
from praktika.utils import Shell, Utils
class ClickHouseProc:
BACKUPS_XML = """
<clickhouse>
<backups>
<type>local</type>
<path>{CH_RUNTIME_DIR}/var/lib/clickhouse/disks/backups/</path>
</backups>
</clickhouse>
"""
def __init__(self, fast_test=False):
self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server"
self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid"
self.config_file = f"{self.ch_config_dir}/config.xml"
self.user_files_path = f"{self.ch_config_dir}/user_files"
self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt"
self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination"
self.proc = None
self.pid = 0
nproc = int(Utils.cpu_count() / 2)
self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \
| tee -a \"{self.test_output_file}\""
# TODO: store info in case of failure
self.info = ""
self.info_file = ""
Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir)
Utils.set_env("CLICKHOUSE_CONFIG", self.config_file)
Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path)
# Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas")
# if not fast_test:
# with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file:
# file.write(self.BACKUPS_XML)
self.minio_proc = None
def start_hdfs(self, log_file_path):
command = ["./ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh"]
with open(log_file_path, "w") as log_file:
process = subprocess.Popen(
command, stdout=log_file, stderr=subprocess.STDOUT
)
print(
f"Started setup_hdfs_minicluster.sh asynchronously with PID {process.pid}"
)
return True
def start_minio(self, test_type, log_file_path):
command = [
"./ci/jobs/scripts/functional_tests/setup_minio.sh",
test_type,
"./tests",
]
with open(log_file_path, "w") as log_file:
process = subprocess.Popen(
command, stdout=log_file, stderr=subprocess.STDOUT
)
print(f"Started setup_minio.sh asynchronously with PID {process.pid}")
return True
def start(self):
print("Starting ClickHouse server")
Shell.check(f"rm {self.pid_file}")
self.proc = subprocess.Popen(self.command, stderr=subprocess.STDOUT, shell=True)
started = False
try:
for _ in range(5):
pid = Shell.get_output(f"cat {self.pid_file}").strip()
if not pid:
Utils.sleep(1)
continue
started = True
print(f"Got pid from fs [{pid}]")
_ = int(pid)
break
except Exception:
pass
if not started:
stdout = self.proc.stdout.read().strip() if self.proc.stdout else ""
stderr = self.proc.stderr.read().strip() if self.proc.stderr else ""
Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr)
return False
print(f"ClickHouse server started successfully, pid [{pid}]")
return True
def wait_ready(self):
res, out, err = 0, "", ""
attempts = 30
delay = 2
for attempt in range(attempts):
res, out, err = Shell.get_res_stdout_stderr(
'clickhouse-client --query "select 1"', verbose=True
)
if out.strip() == "1":
print("Server ready")
break
else:
print(f"Server not ready, wait")
Utils.sleep(delay)
else:
Utils.print_formatted_error(
f"Server not ready after [{attempts*delay}s]", out, err
)
return False
return True
def run_fast_test(self):
if Path(self.test_output_file).exists():
Path(self.test_output_file).unlink()
exit_code = Shell.run(self.fast_test_command)
return exit_code == 0
def terminate(self):
print("Terminate ClickHouse process")
timeout = 10
if self.proc:
Utils.terminate_process_group(self.proc.pid)
self.proc.terminate()
try:
self.proc.wait(timeout=10)
print(f"Process {self.proc.pid} terminated gracefully.")
except Exception:
print(
f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..."
)
Utils.terminate_process_group(self.proc.pid, force=True)
self.proc.wait() # Wait for the process to be fully killed
print(f"Process {self.proc} was killed.")
if self.minio_proc:
Utils.terminate_process_group(self.minio_proc.pid)

View File

@ -0,0 +1,19 @@
#!/bin/bash
# shellcheck disable=SC2024
set -e -x -a -u
ls -lha
cd /hadoop-3.3.1
export JAVA_HOME=/usr
mkdir -p target/test/data
bin/mapred minicluster -format -nomr -nnport 12222 &
while ! nc -z localhost 12222; do
sleep 1
done
lsof -i :12222

View File

@ -0,0 +1,162 @@
#!/bin/bash
set -euxf -o pipefail
export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
TEST_DIR=${2:-/repo/tests/}
if [ -d "$TEMP_DIR" ]; then
TEST_DIR=$(readlink -f $TEST_DIR)
cd "$TEMP_DIR"
# add / for minio mc in docker
PATH="/:.:$PATH"
fi
usage() {
echo $"Usage: $0 <stateful|stateless> <test_path> (default path: /usr/share/clickhouse-test)"
exit 1
}
check_arg() {
local query_dir
if [ ! $# -eq 1 ]; then
if [ ! $# -eq 2 ]; then
echo "ERROR: need either one or two arguments, <stateful|stateless> <test_path> (default path: /usr/share/clickhouse-test)"
usage
fi
fi
case "$1" in
stateless)
query_dir="0_stateless"
;;
stateful)
query_dir="1_stateful"
;;
*)
echo "unknown test type ${test_type}"
usage
;;
esac
echo ${query_dir}
}
find_arch() {
local arch
case $(uname -m) in
x86_64)
arch="amd64"
;;
aarch64)
arch="arm64"
;;
*)
echo "unknown architecture $(uname -m)";
exit 1
;;
esac
echo ${arch}
}
find_os() {
local os
os=$(uname -s | tr '[:upper:]' '[:lower:]')
echo "${os}"
}
download_minio() {
local os
local arch
local minio_server_version=${MINIO_SERVER_VERSION:-2024-08-03T04-33-23Z}
local minio_client_version=${MINIO_CLIENT_VERSION:-2024-07-31T15-58-33Z}
os=$(find_os)
arch=$(find_arch)
wget "https://dl.min.io/server/minio/release/${os}-${arch}/archive/minio.RELEASE.${minio_server_version}" -O ./minio
wget "https://dl.min.io/client/mc/release/${os}-${arch}/archive/mc.RELEASE.${minio_client_version}" -O ./mc
chmod +x ./mc ./minio
}
start_minio() {
pwd
mkdir -p ./minio_data
minio --version
nohup minio server --address ":11111" ./minio_data &
wait_for_it
lsof -i :11111
sleep 5
}
setup_minio() {
local test_type=$1
echo "setup_minio(), test_type=$test_type"
mc alias set clickminio http://localhost:11111 clickhouse clickhouse
mc admin user add clickminio test testtest
mc admin policy attach clickminio readwrite --user=test ||:
mc mb --ignore-existing clickminio/test
if [ "$test_type" = "stateless" ]; then
echo "Create @test bucket in minio"
mc anonymous set public clickminio/test
fi
}
# uploads data to minio, by default after unpacking all tests
# will be in /usr/share/clickhouse-test/queries
upload_data() {
local query_dir=$1
local test_path=$2
local data_path=${test_path}/queries/${query_dir}/data_minio
echo "upload_data() data_path=$data_path"
# iterating over globs will cause redundant file variable to be
# a path to a file, not a filename
# shellcheck disable=SC2045
if [ -d "${data_path}" ]; then
mc cp --recursive "${data_path}"/ clickminio/test/
fi
}
setup_aws_credentials() {
local minio_root_user=${MINIO_ROOT_USER:-clickhouse}
local minio_root_password=${MINIO_ROOT_PASSWORD:-clickhouse}
mkdir -p ~/.aws
cat <<EOT >> ~/.aws/credentials
[default]
aws_access_key_id=${minio_root_user}
aws_secret_access_key=${minio_root_password}
EOT
}
wait_for_it() {
local counter=0
local max_counter=60
local url="http://localhost:11111"
local params=(
--silent
--verbose
)
while ! curl "${params[@]}" "${url}" 2>&1 | grep AccessDenied
do
if [[ ${counter} == "${max_counter}" ]]; then
echo "failed to setup minio"
exit 0
fi
echo "trying to connect to minio"
sleep 1
counter=$((counter + 1))
done
}
main() {
local query_dir
query_dir=$(check_arg "$@")
if ! (minio --version && mc --version); then
download_minio
fi
start_minio
setup_minio "$1"
upload_data "${query_dir}" "$TEST_DIR"
setup_aws_credentials
}
main "$@"

View File

@ -1,7 +1,6 @@
import dataclasses
from typing import List
from praktika.environment import Environment
from praktika.result import Result
OK_SIGN = "[ OK "
@ -233,6 +232,8 @@ class FTResultsProcessor:
else:
pass
info = f"Total: {s.total - s.skipped}, Failed: {s.failed}"
# TODO: !!!
# def test_result_comparator(item):
# # sort by status then by check name
@ -250,10 +251,11 @@ class FTResultsProcessor:
# test_results.sort(key=test_result_comparator)
return Result.create_from(
name=Environment.JOB_NAME,
name="Tests",
results=test_results,
status=state,
files=[self.tests_output_file],
info=info,
with_info_from_results=False,
)

View File

@ -37,6 +37,30 @@ def create_parser():
type=str,
default=None,
)
run_parser.add_argument(
"--test",
help="Custom parameter to pass into a job script, it's up to job script how to use it, for local test",
type=str,
default="",
)
run_parser.add_argument(
"--pr",
help="PR number. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run in that PR",
type=int,
default=None,
)
run_parser.add_argument(
"--sha",
help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that sha, head sha will be used if not set",
type=str,
default=None,
)
run_parser.add_argument(
"--branch",
help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that branch, main branch name will be used if not set",
type=str,
default=None,
)
run_parser.add_argument(
"--ci",
help="When not set - dummy env will be generated, for local test",
@ -85,9 +109,13 @@ if __name__ == "__main__":
workflow=workflow,
job=job,
docker=args.docker,
dummy_env=not args.ci,
local_run=not args.ci,
no_docker=args.no_docker,
param=args.param,
test=args.test,
pr=args.pr,
branch=args.branch,
sha=args.sha,
)
else:
parser.print_help()

View File

@ -6,7 +6,7 @@ from types import SimpleNamespace
from typing import Any, Dict, List, Type
from praktika import Workflow
from praktika._settings import _Settings
from praktika.settings import Settings
from praktika.utils import MetaClasses, T
@ -30,13 +30,12 @@ class _Environment(MetaClasses.Serializable):
INSTANCE_ID: str
INSTANCE_LIFE_CYCLE: str
LOCAL_RUN: bool = False
PARAMETER: Any = None
REPORT_INFO: List[str] = dataclasses.field(default_factory=list)
name = "environment"
@classmethod
def file_name_static(cls, _name=""):
return f"{_Settings.TEMP_DIR}/{cls.name}.json"
return f"{Settings.TEMP_DIR}/{cls.name}.json"
@classmethod
def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T:
@ -67,12 +66,12 @@ class _Environment(MetaClasses.Serializable):
@staticmethod
def get_needs_statuses():
if Path(_Settings.WORKFLOW_STATUS_FILE).is_file():
with open(_Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f:
if Path(Settings.WORKFLOW_STATUS_FILE).is_file():
with open(Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f:
return json.load(f)
else:
print(
f"ERROR: Status file [{_Settings.WORKFLOW_STATUS_FILE}] does not exist"
f"ERROR: Status file [{Settings.WORKFLOW_STATUS_FILE}] does not exist"
)
raise RuntimeError()
@ -159,7 +158,8 @@ class _Environment(MetaClasses.Serializable):
@classmethod
def get_s3_prefix_static(cls, pr_number, branch, sha, latest=False):
prefix = ""
if pr_number > 0:
assert sha or latest
if pr_number and pr_number > 0:
prefix += f"{pr_number}"
else:
prefix += f"{branch}"
@ -171,18 +171,15 @@ class _Environment(MetaClasses.Serializable):
# TODO: find a better place for the function. This file should not import praktika.settings
# as it's requires reading users config, that's why imports nested inside the function
def get_report_url(self):
def get_report_url(self, settings, latest=False):
import urllib
from praktika.settings import Settings
from praktika.utils import Utils
path = Settings.HTML_S3_PATH
for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
path = settings.HTML_S3_PATH
for bucket, endpoint in settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
if bucket in path:
path = path.replace(bucket, endpoint)
break
REPORT_URL = f"https://{path}/{Path(Settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}"
REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={'latest' if latest else self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}"
return REPORT_URL
def is_local_run(self):

View File

@ -1,124 +0,0 @@
import dataclasses
from pathlib import Path
from typing import Dict, Iterable, List, Optional
@dataclasses.dataclass
class _Settings:
######################################
# Pipeline generation settings #
######################################
CI_PATH = "./ci"
WORKFLOW_PATH_PREFIX: str = "./.github/workflows"
WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows"
SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings"
CI_CONFIG_JOB_NAME = "Config Workflow"
DOCKER_BUILD_JOB_NAME = "Docker Builds"
FINISH_WORKFLOW_JOB_NAME = "Finish Workflow"
READY_FOR_MERGE_STATUS_NAME = "Ready for Merge"
CI_CONFIG_RUNS_ON: Optional[List[str]] = None
DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None
VALIDATE_FILE_PATHS: bool = True
######################################
# Runtime Settings #
######################################
MAX_RETRIES_S3 = 3
MAX_RETRIES_GH = 3
######################################
# S3 (artifact storage) settings #
######################################
S3_ARTIFACT_PATH: str = ""
######################################
# CI workspace settings #
######################################
TEMP_DIR: str = "/tmp/praktika"
OUTPUT_DIR: str = f"{TEMP_DIR}/output"
INPUT_DIR: str = f"{TEMP_DIR}/input"
PYTHON_INTERPRETER: str = "python3"
PYTHON_PACKET_MANAGER: str = "pip3"
PYTHON_VERSION: str = "3.9"
INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False
INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt"
ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json"
RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log"
SECRET_GH_APP_ID: str = "GH_APP_ID"
SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY"
ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh"
WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json"
######################################
# CI Cache settings #
######################################
CACHE_VERSION: int = 1
CACHE_DIGEST_LEN: int = 20
CACHE_S3_PATH: str = ""
CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache"
######################################
# Report settings #
######################################
HTML_S3_PATH: str = ""
HTML_PAGE_FILE: str = "./praktika/json.html"
TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"])
S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None
DOCKERHUB_USERNAME: str = ""
DOCKERHUB_SECRET: str = ""
DOCKER_WD: str = "/wd"
######################################
# CI DB Settings #
######################################
SECRET_CI_DB_URL: str = "CI_DB_URL"
SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD"
CI_DB_DB_NAME = ""
CI_DB_TABLE_NAME = ""
CI_DB_INSERT_TIMEOUT_SEC = 5
_USER_DEFINED_SETTINGS = [
"S3_ARTIFACT_PATH",
"CACHE_S3_PATH",
"HTML_S3_PATH",
"S3_BUCKET_TO_HTTP_ENDPOINT",
"TEXT_CONTENT_EXTENSIONS",
"TEMP_DIR",
"OUTPUT_DIR",
"INPUT_DIR",
"CI_CONFIG_RUNS_ON",
"DOCKER_BUILD_RUNS_ON",
"CI_CONFIG_JOB_NAME",
"PYTHON_INTERPRETER",
"PYTHON_VERSION",
"PYTHON_PACKET_MANAGER",
"INSTALL_PYTHON_FOR_NATIVE_JOBS",
"INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS",
"MAX_RETRIES_S3",
"MAX_RETRIES_GH",
"VALIDATE_FILE_PATHS",
"DOCKERHUB_USERNAME",
"DOCKERHUB_SECRET",
"READY_FOR_MERGE_STATUS_NAME",
"SECRET_CI_DB_URL",
"SECRET_CI_DB_PASSWORD",
"CI_DB_DB_NAME",
"CI_DB_TABLE_NAME",
"CI_DB_INSERT_TIMEOUT_SEC",
"SECRET_GH_APP_PEM_KEY",
"SECRET_GH_APP_ID",
]
class GHRunners:
ubuntu = "ubuntu-latest"
if __name__ == "__main__":
for setting in _USER_DEFINED_SETTINGS:
print(_Settings().__getattribute__(setting))
# print(dataclasses.asdict(_Settings()))

View File

@ -52,7 +52,7 @@ class CIDB:
check_status=result.status,
check_duration_ms=int(result.duration * 1000),
check_start_time=Utils.timestamp_to_str(result.start_time),
report_url=env.get_report_url(),
report_url=env.get_report_url(settings=Settings),
pull_request_url=env.CHANGE_URL,
base_ref=env.BASE_BRANCH,
base_repo=env.REPOSITORY,

View File

@ -23,7 +23,7 @@ class Digest:
hash_string = hash_obj.hexdigest()
return hash_string
def calc_job_digest(self, job_config: Job.Config):
def calc_job_digest(self, job_config: Job.Config, docker_digests):
config = job_config.digest_config
if not config:
return "f" * Settings.CACHE_DIGEST_LEN
@ -31,32 +31,32 @@ class Digest:
cache_key = self._hash_digest_config(config)
if cache_key in self.digest_cache:
return self.digest_cache[cache_key]
included_files = Utils.traverse_paths(
job_config.digest_config.include_paths,
job_config.digest_config.exclude_paths,
sorted=True,
)
print(
f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files"
)
# Sort files to ensure consistent hash calculation
included_files.sort()
# Calculate MD5 hash
res = ""
if not included_files:
res = "f" * Settings.CACHE_DIGEST_LEN
print(f"NOTE: empty digest config [{config}] - return dummy digest")
print(
f"calc digest for job [{job_config.name}]: hash_key [{cache_key}] - from cache"
)
digest = self.digest_cache[cache_key]
else:
included_files = Utils.traverse_paths(
job_config.digest_config.include_paths,
job_config.digest_config.exclude_paths,
sorted=True,
)
print(
f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files"
)
hash_md5 = hashlib.md5()
for file_path in included_files:
res = self._calc_file_digest(file_path, hash_md5)
assert res
self.digest_cache[cache_key] = res
return res
for i, file_path in enumerate(included_files):
hash_md5 = self._calc_file_digest(file_path, hash_md5)
digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
self.digest_cache[cache_key] = digest
if job_config.run_in_docker:
# respect docker digest in the job digest
docker_digest = docker_digests[job_config.run_in_docker.split("+")[0]]
digest = "-".join([docker_digest, digest])
return digest
def calc_docker_digest(
self,
@ -103,10 +103,10 @@ class Digest:
print(
f"WARNING: No valid file resolved by link {file_path} -> {resolved_path} - skipping digest calculation"
)
return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
return hash_md5
with open(resolved_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN]
return hash_md5

View File

@ -1,3 +0,0 @@
from praktika._environment import _Environment
Environment = _Environment.get()

View File

@ -18,7 +18,7 @@ class GH:
ret_code, out, err = Shell.get_res_stdout_stderr(command, verbose=True)
res = ret_code == 0
if not res and "Validation Failed" in err:
print("ERROR: GH command validation error")
print(f"ERROR: GH command validation error.")
break
if not res and "Bad credentials" in err:
print("ERROR: GH credentials/auth failure")

View File

@ -1,6 +1,5 @@
from praktika._environment import _Environment
from praktika.cache import Cache
from praktika.mangle import _get_workflows
from praktika.runtime import RunConfig
from praktika.settings import Settings
from praktika.utils import Utils
@ -8,11 +7,10 @@ from praktika.utils import Utils
class CacheRunnerHooks:
@classmethod
def configure(cls, _workflow):
workflow_config = RunConfig.from_fs(_workflow.name)
def configure(cls, workflow):
workflow_config = RunConfig.from_fs(workflow.name)
docker_digests = workflow_config.digest_dockers
cache = Cache()
assert _Environment.get().WORKFLOW_NAME
workflow = _get_workflows(name=_Environment.get().WORKFLOW_NAME)[0]
print(f"Workflow Configure, workflow [{workflow.name}]")
assert (
workflow.enable_cache
@ -20,11 +18,13 @@ class CacheRunnerHooks:
artifact_digest_map = {}
job_digest_map = {}
for job in workflow.jobs:
digest = cache.digest.calc_job_digest(
job_config=job, docker_digests=docker_digests
)
if not job.digest_config:
print(
f"NOTE: job [{job.name}] has no Config.digest_config - skip cache check, always run"
)
digest = cache.digest.calc_job_digest(job_config=job)
job_digest_map[job.name] = digest
if job.provides:
# assign the job digest also to the artifacts it provides
@ -50,7 +50,6 @@ class CacheRunnerHooks:
), f"BUG, Workflow with enabled cache must have job digests after configuration, wf [{workflow.name}]"
print("Check remote cache")
job_to_cache_record = {}
for job_name, job_digest in workflow_config.digest_jobs.items():
record = cache.fetch_success(job_name=job_name, job_digest=job_digest)
if record:
@ -60,7 +59,7 @@ class CacheRunnerHooks:
)
workflow_config.cache_success.append(job_name)
workflow_config.cache_success_base64.append(Utils.to_base64(job_name))
job_to_cache_record[job_name] = record
workflow_config.cache_jobs[job_name] = record
print("Check artifacts to reuse")
for job in workflow.jobs:
@ -68,7 +67,7 @@ class CacheRunnerHooks:
if job.provides:
for artifact_name in job.provides:
workflow_config.cache_artifacts[artifact_name] = (
job_to_cache_record[job.name]
workflow_config.cache_jobs[job.name]
)
print(f"Write config to GH's job output")

View File

@ -1,63 +1,125 @@
import dataclasses
import json
import urllib.parse
from pathlib import Path
from typing import List
from praktika._environment import _Environment
from praktika.gh import GH
from praktika.parser import WorkflowConfigParser
from praktika.result import Result, ResultInfo
from praktika.result import Result, ResultInfo, _ResultS3
from praktika.runtime import RunConfig
from praktika.s3 import S3
from praktika.settings import Settings
from praktika.utils import Shell, Utils
from praktika.utils import Utils
@dataclasses.dataclass
class GitCommit:
date: str
message: str
# date: str
# message: str
sha: str
@staticmethod
def from_json(json_data: str) -> List["GitCommit"]:
def from_json(file) -> List["GitCommit"]:
commits = []
json_data = None
try:
data = json.loads(json_data)
with open(file, "r", encoding="utf-8") as f:
json_data = json.load(f)
commits = [
GitCommit(
message=commit["messageHeadline"],
sha=commit["oid"],
date=commit["committedDate"],
# message=commit["messageHeadline"],
sha=commit["sha"],
# date=commit["committedDate"],
)
for commit in data.get("commits", [])
for commit in json_data
]
except Exception as e:
print(
f"ERROR: Failed to deserialize commit's data: [{json_data}], ex: [{e}]"
f"ERROR: Failed to deserialize commit's data [{json_data}], ex: [{e}]"
)
return commits
@classmethod
def update_s3_data(cls):
env = _Environment.get()
sha = env.SHA
if not sha:
print("WARNING: Failed to retrieve commit sha")
return
commits = cls.pull_from_s3()
for commit in commits:
if sha == commit.sha:
print(
f"INFO: Sha already present in commits data [{sha}] - skip data update"
)
return
commits.append(GitCommit(sha=sha))
cls.push_to_s3(commits)
return
@classmethod
def dump(cls, commits):
commits_ = []
for commit in commits:
commits_.append(dataclasses.asdict(commit))
with open(cls.file_name(), "w", encoding="utf8") as f:
json.dump(commits_, f)
@classmethod
def pull_from_s3(cls):
local_path = Path(cls.file_name())
file_name = local_path.name
env = _Environment.get()
s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}"
if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path):
print(f"WARNING: failed to cp file [{s3_path}] from s3")
return []
return cls.from_json(local_path)
@classmethod
def push_to_s3(cls, commits):
print(f"INFO: push commits data to s3, commits num [{len(commits)}]")
cls.dump(commits)
local_path = Path(cls.file_name())
file_name = local_path.name
env = _Environment.get()
s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}"
if not S3.copy_file_to_s3(s3_path=s3_path, local_path=local_path, text=True):
print(f"WARNING: failed to cp file [{local_path}] to s3")
@classmethod
def get_s3_prefix(cls, pr_number, branch):
prefix = ""
assert pr_number or branch
if pr_number and pr_number > 0:
prefix += f"{pr_number}"
else:
prefix += f"{branch}"
return prefix
@classmethod
def file_name(cls):
return f"{Settings.TEMP_DIR}/commits.json"
# def _get_pr_commits(pr_number):
# res = []
# if not pr_number:
# return res
# output = Shell.get_output(f"gh pr view {pr_number} --json commits")
# if output:
# res = GitCommit.from_json(output)
# return res
class HtmlRunnerHooks:
@classmethod
def configure(cls, _workflow):
def _get_pr_commits(pr_number):
res = []
if not pr_number:
return res
output = Shell.get_output(f"gh pr view {pr_number} --json commits")
if output:
res = GitCommit.from_json(output)
return res
# generate pending Results for all jobs in the workflow
if _workflow.enable_cache:
skip_jobs = RunConfig.from_fs(_workflow.name).cache_success
job_cache_records = RunConfig.from_fs(_workflow.name).cache_jobs
else:
skip_jobs = []
@ -67,36 +129,22 @@ class HtmlRunnerHooks:
if job.name not in skip_jobs:
result = Result.generate_pending(job.name)
else:
result = Result.generate_skipped(job.name)
result = Result.generate_skipped(job.name, job_cache_records[job.name])
results.append(result)
summary_result = Result.generate_pending(_workflow.name, results=results)
summary_result.aux_links.append(env.CHANGE_URL)
summary_result.aux_links.append(env.RUN_URL)
summary_result.links.append(env.CHANGE_URL)
summary_result.links.append(env.RUN_URL)
summary_result.start_time = Utils.timestamp()
page_url = "/".join(
["https:/", Settings.HTML_S3_PATH, str(Path(Settings.HTML_PAGE_FILE).name)]
)
for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items():
page_url = page_url.replace(bucket, endpoint)
# TODO: add support for non-PRs (use branch?)
page_url += f"?PR={env.PR_NUMBER}&sha=latest&name_0={urllib.parse.quote(env.WORKFLOW_NAME, safe='')}"
summary_result.html_link = page_url
# clean the previous latest results in PR if any
if env.PR_NUMBER:
S3.clean_latest_result()
S3.copy_result_to_s3(
summary_result,
unlock=False,
)
assert _ResultS3.copy_result_to_s3_with_version(summary_result, version=0)
page_url = env.get_report_url(settings=Settings)
print(f"CI Status page url [{page_url}]")
res1 = GH.post_commit_status(
name=_workflow.name,
status=Result.Status.PENDING,
description="",
url=page_url,
url=env.get_report_url(settings=Settings, latest=True),
)
res2 = GH.post_pr_comment(
comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]",
@ -106,23 +154,15 @@ class HtmlRunnerHooks:
Utils.raise_with_error(
"Failed to set both GH commit status and PR comment with Workflow Status, cannot proceed"
)
if env.PR_NUMBER:
commits = _get_pr_commits(env.PR_NUMBER)
# TODO: upload commits data to s3 to visualise it on a report page
print(commits)
# TODO: enable for branch, add commit number limiting
GitCommit.update_s3_data()
@classmethod
def pre_run(cls, _workflow, _job):
result = Result.from_fs(_job.name)
S3.copy_result_from_s3(
Result.file_name_static(_workflow.name),
)
workflow_result = Result.from_fs(_workflow.name)
workflow_result.update_sub_result(result)
S3.copy_result_to_s3(
workflow_result,
unlock=True,
_ResultS3.update_workflow_results(
workflow_name=_workflow.name, new_sub_results=result
)
@classmethod
@ -132,14 +172,13 @@ class HtmlRunnerHooks:
@classmethod
def post_run(cls, _workflow, _job, info_errors):
result = Result.from_fs(_job.name)
env = _Environment.get()
S3.copy_result_from_s3(
Result.file_name_static(_workflow.name),
lock=True,
)
workflow_result = Result.from_fs(_workflow.name)
print(f"Workflow info [{workflow_result.info}], info_errors [{info_errors}]")
_ResultS3.upload_result_files_to_s3(result)
_ResultS3.copy_result_to_s3(result)
env = _Environment.get()
new_sub_results = [result]
new_result_info = ""
env_info = env.REPORT_INFO
if env_info:
print(
@ -151,14 +190,8 @@ class HtmlRunnerHooks:
info_str = f"{_job.name}:\n"
info_str += "\n".join(info_errors)
print("Update workflow results with new info")
workflow_result.set_info(info_str)
new_result_info = info_str
old_status = workflow_result.status
S3.upload_result_files_to_s3(result)
workflow_result.update_sub_result(result)
skipped_job_results = []
if not result.is_ok():
print(
"Current job failed - find dependee jobs in the workflow and set their statuses to skipped"
@ -171,7 +204,7 @@ class HtmlRunnerHooks:
print(
f"NOTE: Set job [{dependee_job.name}] status to [{Result.Status.SKIPPED}] due to current failure"
)
skipped_job_results.append(
new_sub_results.append(
Result(
name=dependee_job.name,
status=Result.Status.SKIPPED,
@ -179,20 +212,18 @@ class HtmlRunnerHooks:
+ f" [{_job.name}]",
)
)
for skipped_job_result in skipped_job_results:
workflow_result.update_sub_result(skipped_job_result)
S3.copy_result_to_s3(
workflow_result,
unlock=True,
updated_status = _ResultS3.update_workflow_results(
new_info=new_result_info,
new_sub_results=new_sub_results,
workflow_name=_workflow.name,
)
if workflow_result.status != old_status:
print(
f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}], link [{workflow_result.html_link}]"
)
if updated_status:
print(f"Update GH commit status [{result.name}]: [{updated_status}]")
GH.post_commit_status(
name=workflow_result.name,
status=GH.convert_to_gh_status(workflow_result.status),
name=_workflow.name,
status=GH.convert_to_gh_status(updated_status),
description="",
url=workflow_result.html_link,
url=env.get_report_url(settings=Settings, latest=True),
)

View File

@ -52,30 +52,58 @@ class Job:
self,
parameter: Optional[List[Any]] = None,
runs_on: Optional[List[List[str]]] = None,
provides: Optional[List[List[str]]] = None,
requires: Optional[List[List[str]]] = None,
timeout: Optional[List[int]] = None,
):
assert (
parameter or runs_on
), "Either :parameter or :runs_on must be non empty list for parametrisation"
if runs_on:
assert isinstance(runs_on, list) and isinstance(runs_on[0], list)
if not parameter:
parameter = [None] * len(runs_on)
if not runs_on:
runs_on = [None] * len(parameter)
if not timeout:
timeout = [None] * len(parameter)
if not provides:
provides = [None] * len(parameter)
if not requires:
requires = [None] * len(parameter)
assert (
len(parameter) == len(runs_on) == len(timeout)
), "Parametrization lists must be of the same size"
len(parameter)
== len(runs_on)
== len(timeout)
== len(provides)
== len(requires)
), f"Parametrization lists must be of the same size [{len(parameter)}, {len(runs_on)}, {len(timeout)}, {len(provides)}, {len(requires)}]"
res = []
for parameter_, runs_on_, timeout_ in zip(parameter, runs_on, timeout):
for parameter_, runs_on_, timeout_, provides_, requires_ in zip(
parameter, runs_on, timeout, provides, requires
):
obj = copy.deepcopy(self)
assert (
not obj.provides
), "Job.Config.provides must be empty for parametrized jobs"
if parameter_:
obj.parameter = parameter_
obj.command = obj.command.format(PARAMETER=parameter_)
if runs_on_:
obj.runs_on = runs_on_
if timeout_:
obj.timeout = timeout_
if provides_:
assert (
not obj.provides
), "Job.Config.provides must be empty for parametrized jobs"
obj.provides = provides_
if requires_:
assert (
not obj.requires
), "Job.Config.requires and parametrize(requires=...) are both set"
obj.requires = requires_
obj.name = obj.get_job_name_with_parameter()
res.append(obj)
return res
@ -84,13 +112,16 @@ class Job:
name, parameter, runs_on = self.name, self.parameter, self.runs_on
res = name
name_params = []
if isinstance(parameter, list) or isinstance(parameter, dict):
name_params.append(json.dumps(parameter))
elif parameter is not None:
name_params.append(parameter)
if runs_on:
if parameter:
if isinstance(parameter, list) or isinstance(parameter, dict):
name_params.append(json.dumps(parameter))
else:
name_params.append(parameter)
elif runs_on:
assert isinstance(runs_on, list)
name_params.append(json.dumps(runs_on))
else:
assert False
if name_params:
name_params = [str(param) for param in name_params]
res += f" ({', '.join(name_params)})"

View File

@ -89,15 +89,27 @@
letter-spacing: -0.5px;
}
.dropdown-value {
width: 100px;
font-weight: normal;
font-family: inherit;
background-color: transparent;
color: inherit;
/*border: none;*/
/*outline: none;*/
/*cursor: pointer;*/
}
#result-container {
background-color: var(--tile-background);
margin-left: calc(var(--status-width) + 20px);
padding: 20px;
padding: 0;
box-sizing: border-box;
text-align: center;
font-size: 18px;
font-weight: normal;
flex-grow: 1;
margin-bottom: 40px;
}
#footer {
@ -189,10 +201,7 @@
}
th.name-column, td.name-column {
max-width: 400px; /* Set the maximum width for the column */
white-space: nowrap; /* Prevent text from wrapping */
overflow: hidden; /* Hide the overflowed text */
text-overflow: ellipsis; /* Show ellipsis (...) for overflowed text */
min-width: 350px;
}
th.status-column, td.status-column {
@ -282,6 +291,12 @@
}
}
function updateUrlParameter(paramName, paramValue) {
const url = new URL(window.location.href);
url.searchParams.set(paramName, paramValue);
window.location.href = url.toString();
}
// Attach the toggle function to the click event of the icon
document.getElementById('theme-toggle').addEventListener('click', toggleTheme);
@ -291,14 +306,14 @@
const monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
const month = monthNames[date.getMonth()];
const year = date.getFullYear();
//const year = date.getFullYear();
const hours = String(date.getHours()).padStart(2, '0');
const minutes = String(date.getMinutes()).padStart(2, '0');
const seconds = String(date.getSeconds()).padStart(2, '0');
//const milliseconds = String(date.getMilliseconds()).padStart(2, '0');
return showDate
? `${day}-${month}-${year} ${hours}:${minutes}:${seconds}`
? `${day}'${month} ${hours}:${minutes}:${seconds}`
: `${hours}:${minutes}:${seconds}`;
}
@ -328,7 +343,7 @@
const milliseconds = Math.floor((duration % 1) * 1000);
const formattedSeconds = String(seconds);
const formattedMilliseconds = String(milliseconds).padStart(3, '0');
const formattedMilliseconds = String(milliseconds).padStart(2, '0').slice(-2);
return `${formattedSeconds}.${formattedMilliseconds}`;
}
@ -346,8 +361,7 @@
return 'status-other';
}
function addKeyValueToStatus(key, value) {
function addKeyValueToStatus(key, value, options = null) {
const statusContainer = document.getElementById('status-container');
let keyValuePair = document.createElement('div');
@ -357,12 +371,40 @@
keyElement.className = 'json-key';
keyElement.textContent = key + ':';
const valueElement = document.createElement('div');
valueElement.className = 'json-value';
valueElement.textContent = value;
let valueElement;
keyValuePair.appendChild(keyElement)
keyValuePair.appendChild(valueElement)
if (options) {
// Create dropdown if options are provided
valueElement = document.createElement('select');
valueElement.className = 'dropdown-value';
options.forEach(optionValue => {
const option = document.createElement('option');
option.value = optionValue;
option.textContent = optionValue.slice(0, 10);
// Set the initially selected option
if (optionValue === value) {
option.selected = true;
}
valueElement.appendChild(option);
});
// Update the URL parameter when the selected value changes
valueElement.addEventListener('change', (event) => {
const selectedValue = event.target.value;
updateUrlParameter(key, selectedValue);
});
} else {
// Create a simple text display if no options are provided
valueElement = document.createElement('div');
valueElement.className = 'json-value';
valueElement.textContent = value || 'N/A'; // Display 'N/A' if value is null
}
keyValuePair.appendChild(keyElement);
keyValuePair.appendChild(valueElement);
statusContainer.appendChild(keyValuePair);
}
@ -486,12 +528,12 @@
const columns = ['name', 'status', 'start_time', 'duration', 'info'];
const columnSymbols = {
name: '📂',
status: '✔️',
name: '🗂️',
status: '🧾',
start_time: '🕒',
duration: '⏳',
info: '',
files: '📄'
info: '📝',
files: '📎'
};
function createResultsTable(results, nest_level) {
@ -500,16 +542,14 @@
const thead = document.createElement('thead');
const tbody = document.createElement('tbody');
// Get the current URL parameters
const currentUrl = new URL(window.location.href);
// Create table headers based on the fixed columns
const headerRow = document.createElement('tr');
columns.forEach(column => {
const th = document.createElement('th');
th.textContent = th.textContent = columnSymbols[column] || column;
th.textContent = columnSymbols[column] || column;
th.style.cursor = 'pointer'; // Make headers clickable
th.addEventListener('click', () => sortTable(results, column, tbody, nest_level)); // Add click event to sort the table
th.setAttribute('data-sort-direction', 'asc'); // Default sort direction
th.addEventListener('click', () => sortTable(results, column, columnSymbols[column] || column, tbody, nest_level, columns)); // Add click event to sort the table
headerRow.appendChild(th);
});
thead.appendChild(headerRow);
@ -561,8 +601,7 @@
td.classList.add('time-column');
td.textContent = value ? formatDuration(value) : '';
} else if (column === 'info') {
// For info and other columns, just display the value
td.textContent = value || '';
td.textContent = value.includes('\n') ? '↵' : (value || '');
td.classList.add('info-column');
}
@ -573,39 +612,33 @@
});
}
function sortTable(results, key, tbody, nest_level) {
function sortTable(results, column, key, tbody, nest_level, columns) {
// Find the table header element for the given key
let th = null;
const tableHeaders = document.querySelectorAll('th'); // Select all table headers
tableHeaders.forEach(header => {
if (header.textContent.trim().toLowerCase() === key.toLowerCase()) {
th = header;
}
});
const tableHeaders = document.querySelectorAll('th');
let th = Array.from(tableHeaders).find(header => header.textContent === key);
if (!th) {
console.error(`No table header found for key: ${key}`);
return;
}
// Determine the current sort direction
let ascending = th.getAttribute('data-sort-direction') === 'asc' ? false : true;
const ascending = th.getAttribute('data-sort-direction') === 'asc';
th.setAttribute('data-sort-direction', ascending ? 'desc' : 'asc');
// Toggle the sort direction for the next click
th.setAttribute('data-sort-direction', ascending ? 'asc' : 'desc');
// Sort the results array by the given key
results.sort((a, b) => {
if (a[key] < b[key]) return ascending ? -1 : 1;
if (a[key] > b[key]) return ascending ? 1 : -1;
if (a[column] < b[column]) return ascending ? -1 : 1;
if (a[column] > b[column]) return ascending ? 1 : -1;
return 0;
});
// Clear the existing rows in tbody
tbody.innerHTML = '';
// Re-populate the table with sorted data
populateTableRows(tbody, results, columns, nest_level);
}
function loadJSON(PR, sha, nameParams) {
function loadResultsJSON(PR, sha, nameParams) {
const infoElement = document.getElementById('info-container');
let lastModifiedTime = null;
const task = nameParams[0].toLowerCase();
@ -630,19 +663,20 @@
let targetData = navigatePath(data, nameParams);
let nest_level = nameParams.length;
if (targetData) {
infoElement.style.display = 'none';
// Add footer links from top-level Result
if (Array.isArray(data.links) && data.links.length > 0) {
data.links.forEach(link => {
const a = document.createElement('a');
a.href = link;
a.textContent = link.split('/').pop();
a.target = '_blank';
footerRight.appendChild(a);
});
}
// Handle footer links if present
if (Array.isArray(data.aux_links) && data.aux_links.length > 0) {
data.aux_links.forEach(link => {
const a = document.createElement('a');
a.href = link;
a.textContent = link.split('/').pop();
a.target = '_blank';
footerRight.appendChild(a);
});
}
if (targetData) {
//infoElement.style.display = 'none';
infoElement.innerHTML = (targetData.info || '').replace(/\n/g, '<br>');
addStatusToStatus(targetData.status, targetData.start_time, targetData.duration)
@ -721,22 +755,62 @@
}
});
if (PR) {
addKeyValueToStatus("PR", PR)
} else {
console.error("TODO")
}
addKeyValueToStatus("sha", sha);
if (nameParams[1]) {
addKeyValueToStatus("job", nameParams[1]);
}
addKeyValueToStatus("workflow", nameParams[0]);
let path_commits_json = '';
let commitsArray = [];
if (PR && sha && root_name) {
loadJSON(PR, sha, nameParams);
if (PR) {
addKeyValueToStatus("PR", PR);
const baseUrl = window.location.origin + window.location.pathname.replace('/json.html', '');
path_commits_json = `${baseUrl}/${encodeURIComponent(PR)}/commits.json`;
} else {
document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0';
// Placeholder for a different path when PR is missing
console.error("PR parameter is missing. Setting alternate commits path.");
path_commits_json = '/path/to/alternative/commits.json';
}
function loadCommitsArray(path) {
return fetch(path, { cache: "no-cache" })
.then(response => {
if (!response.ok) {
console.error(`HTTP error! status: ${response.status}`)
return [];
}
return response.json();
})
.then(data => {
if (Array.isArray(data) && data.every(item => typeof item === 'object' && item.hasOwnProperty('sha'))) {
return data.map(item => item.sha);
} else {
throw new Error('Invalid data format: expected array of objects with a "sha" key');
}
})
.catch(error => {
console.error('Error loading commits JSON:', error);
return []; // Return an empty array if an error occurs
});
}
loadCommitsArray(path_commits_json)
.then(data => {
commitsArray = data;
})
.finally(() => {
// Proceed with the rest of the initialization
addKeyValueToStatus("sha", sha || "latest", commitsArray.concat(["latest"]));
if (nameParams[1]) {
addKeyValueToStatus("job", nameParams[1]);
}
addKeyValueToStatus("workflow", nameParams[0]);
// Check if all required parameters are present to load JSON
if (PR && sha && root_name) {
const shaToLoad = (sha === 'latest') ? commitsArray[commitsArray.length - 1] : sha;
loadResultsJSON(PR, shaToLoad, nameParams);
} else {
document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0';
}
});
}
window.onload = init;

View File

@ -1,11 +1,10 @@
import copy
import importlib.util
from pathlib import Path
from typing import Any, Dict
from praktika import Job
from praktika._settings import _USER_DEFINED_SETTINGS, _Settings
from praktika.utils import ContextManager, Utils
from praktika.settings import Settings
from praktika.utils import Utils
def _get_workflows(name=None, file=None):
@ -14,35 +13,34 @@ def _get_workflows(name=None, file=None):
"""
res = []
with ContextManager.cd():
directory = Path(_Settings.WORKFLOWS_DIRECTORY)
for py_file in directory.glob("*.py"):
if file and file not in str(py_file):
continue
module_name = py_file.name.removeprefix(".py")
spec = importlib.util.spec_from_file_location(
module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}"
)
assert spec
foo = importlib.util.module_from_spec(spec)
assert spec.loader
spec.loader.exec_module(foo)
try:
for workflow in foo.WORKFLOWS:
if name:
if name == workflow.name:
print(f"Read workflow [{name}] config from [{module_name}]")
res = [workflow]
break
else:
continue
directory = Path(Settings.WORKFLOWS_DIRECTORY)
for py_file in directory.glob("*.py"):
if file and file not in str(py_file):
continue
module_name = py_file.name.removeprefix(".py")
spec = importlib.util.spec_from_file_location(
module_name, f"{Settings.WORKFLOWS_DIRECTORY}/{module_name}"
)
assert spec
foo = importlib.util.module_from_spec(spec)
assert spec.loader
spec.loader.exec_module(foo)
try:
for workflow in foo.WORKFLOWS:
if name:
if name == workflow.name:
print(f"Read workflow [{name}] config from [{module_name}]")
res = [workflow]
break
else:
res += foo.WORKFLOWS
print(f"Read workflow configs from [{module_name}]")
except Exception as e:
print(
f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]"
)
continue
else:
res += foo.WORKFLOWS
print(f"Read workflow configs from [{module_name}]")
except Exception as e:
print(
f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]"
)
if not res:
Utils.raise_with_error(f"Failed to find workflow [{name or file}]")
@ -58,7 +56,6 @@ def _update_workflow_artifacts(workflow):
artifact_job = {}
for job in workflow.jobs:
for artifact_name in job.provides:
assert artifact_name not in artifact_job
artifact_job[artifact_name] = job.name
for artifact in workflow.artifacts:
artifact._provided_by = artifact_job[artifact.name]
@ -108,30 +105,3 @@ def _update_workflow_with_native_jobs(workflow):
for job in workflow.jobs:
aux_job.requires.append(job.name)
workflow.jobs.append(aux_job)
def _get_user_settings() -> Dict[str, Any]:
"""
Gets user's settings
"""
res = {} # type: Dict[str, Any]
directory = Path(_Settings.SETTINGS_DIRECTORY)
for py_file in directory.glob("*.py"):
module_name = py_file.name.removeprefix(".py")
spec = importlib.util.spec_from_file_location(
module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}"
)
assert spec
foo = importlib.util.module_from_spec(spec)
assert spec.loader
spec.loader.exec_module(foo)
for setting in _USER_DEFINED_SETTINGS:
try:
value = getattr(foo, setting)
res[setting] = value
print(f"Apply user defined setting [{setting} = {value}]")
except Exception as e:
pass
return res

View File

@ -10,9 +10,8 @@ from praktika.gh import GH
from praktika.hook_cache import CacheRunnerHooks
from praktika.hook_html import HtmlRunnerHooks
from praktika.mangle import _get_workflows
from praktika.result import Result, ResultInfo
from praktika.result import Result, ResultInfo, _ResultS3
from praktika.runtime import RunConfig
from praktika.s3 import S3
from praktika.settings import Settings
from praktika.utils import Shell, Utils
@ -151,7 +150,7 @@ def _config_workflow(workflow: Workflow.Config, job_name):
status = Result.Status.ERROR
print("ERROR: ", info)
else:
Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika --generate")
assert Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika yaml")
exit_code, output, err = Shell.get_res_stdout_stderr(
f"git diff-index HEAD -- {Settings.WORKFLOW_PATH_PREFIX}"
)
@ -225,6 +224,7 @@ def _config_workflow(workflow: Workflow.Config, job_name):
cache_success=[],
cache_success_base64=[],
cache_artifacts={},
cache_jobs={},
).dump()
# checks:
@ -250,6 +250,9 @@ def _config_workflow(workflow: Workflow.Config, job_name):
info_lines.append(job_name + ": " + info)
results.append(result_)
if workflow.enable_merge_commit:
assert False, "NOT implemented"
# config:
if workflow.dockers:
print("Calculate docker's digests")
@ -307,9 +310,8 @@ def _finish_workflow(workflow, job_name):
print(env.get_needs_statuses())
print("Check Workflow results")
S3.copy_result_from_s3(
_ResultS3.copy_result_from_s3(
Result.file_name_static(workflow.name),
lock=False,
)
workflow_result = Result.from_fs(workflow.name)
@ -339,10 +341,12 @@ def _finish_workflow(workflow, job_name):
f"NOTE: Result for [{result.name}] has not ok status [{result.status}]"
)
ready_for_merge_status = Result.Status.FAILED
failed_results.append(result.name.split("(", maxsplit=1)[0]) # cut name
failed_results.append(result.name)
if failed_results:
ready_for_merge_description = f"failed: {', '.join(failed_results)}"
ready_for_merge_description = (
f'Failed {len(failed_results)} "Required for Merge" jobs'
)
if not GH.post_commit_status(
name=Settings.READY_FOR_MERGE_STATUS_NAME + f" [{workflow.name}]",
@ -354,14 +358,11 @@ def _finish_workflow(workflow, job_name):
env.add_info(ResultInfo.GH_STATUS_ERROR)
if update_final_report:
S3.copy_result_to_s3(
_ResultS3.copy_result_to_s3(
workflow_result,
unlock=False,
) # no lock - no unlock
)
Result.from_fs(job_name).set_status(Result.Status.SUCCESS).set_info(
ready_for_merge_description
)
Result.from_fs(job_name).set_status(Result.Status.SUCCESS)
if __name__ == "__main__":

View File

@ -1,12 +1,13 @@
import dataclasses
import datetime
import sys
from collections.abc import Container
from pathlib import Path
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Optional, Union
from praktika._environment import _Environment
from praktika._settings import _Settings
from praktika.cache import Cache
from praktika.s3 import S3
from praktika.settings import Settings
from praktika.utils import ContextManager, MetaClasses, Shell, Utils
@ -27,10 +28,6 @@ class Result(MetaClasses.Serializable):
files (List[str]): A list of file paths or names related to the result.
links (List[str]): A list of URLs related to the result (e.g., links to reports or resources).
info (str): Additional information about the result. Free-form text.
# TODO: rename
aux_links (List[str]): A list of auxiliary links that provide additional context for the result.
# TODO: remove
html_link (str): A direct link to an HTML representation of the result (e.g., a detailed report page).
Inner Class:
Status: Defines possible statuses for the task, such as "success", "failure", etc.
@ -52,8 +49,6 @@ class Result(MetaClasses.Serializable):
files: List[str] = dataclasses.field(default_factory=list)
links: List[str] = dataclasses.field(default_factory=list)
info: str = ""
aux_links: List[str] = dataclasses.field(default_factory=list)
html_link: str = ""
@staticmethod
def create_from(
@ -62,14 +57,15 @@ class Result(MetaClasses.Serializable):
stopwatch: Utils.Stopwatch = None,
status="",
files=None,
info="",
info: Union[List[str], str] = "",
with_info_from_results=True,
):
if isinstance(status, bool):
status = Result.Status.SUCCESS if status else Result.Status.FAILED
if not results and not status:
print("ERROR: Either .results or .status must be provided")
raise
Utils.raise_with_error(
f"Either .results ({results}) or .status ({status}) must be provided"
)
if not name:
name = _Environment.get().JOB_NAME
if not name:
@ -78,10 +74,10 @@ class Result(MetaClasses.Serializable):
result_status = status or Result.Status.SUCCESS
infos = []
if info:
if isinstance(info, Container):
infos += info
if isinstance(info, str):
infos += [info]
else:
infos.append(info)
infos += info
if results and not status:
for result in results:
if result.status not in (Result.Status.SUCCESS, Result.Status.FAILED):
@ -112,7 +108,7 @@ class Result(MetaClasses.Serializable):
return self.status not in (Result.Status.PENDING, Result.Status.RUNNING)
def is_running(self):
return self.status not in (Result.Status.RUNNING,)
return self.status in (Result.Status.RUNNING,)
def is_ok(self):
return self.status in (Result.Status.SKIPPED, Result.Status.SUCCESS)
@ -155,7 +151,7 @@ class Result(MetaClasses.Serializable):
@classmethod
def file_name_static(cls, name):
return f"{_Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json"
return f"{Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json"
@classmethod
def from_dict(cls, obj: Dict[str, Any]) -> "Result":
@ -180,6 +176,11 @@ class Result(MetaClasses.Serializable):
)
return self
def set_timing(self, stopwatch: Utils.Stopwatch):
self.start_time = stopwatch.start_time
self.duration = stopwatch.duration
return self
def update_sub_result(self, result: "Result"):
assert self.results, "BUG?"
for i, result_ in enumerate(self.results):
@ -233,7 +234,7 @@ class Result(MetaClasses.Serializable):
)
@classmethod
def generate_skipped(cls, name, results=None):
def generate_skipped(cls, name, cache_record: Cache.CacheRecord, results=None):
return Result(
name=name,
status=Result.Status.SKIPPED,
@ -242,7 +243,7 @@ class Result(MetaClasses.Serializable):
results=results or [],
files=[],
links=[],
info="from cache",
info=f"from cache: sha [{cache_record.sha}], pr/branch [{cache_record.pr_number or cache_record.branch}]",
)
@classmethod
@ -276,7 +277,7 @@ class Result(MetaClasses.Serializable):
# Set log file path if logging is enabled
log_file = (
f"{_Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log"
f"{Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log"
if with_log
else None
)
@ -318,18 +319,35 @@ class Result(MetaClasses.Serializable):
files=[log_file] if log_file else None,
)
def finish_job_accordingly(self):
def complete_job(self):
self.dump()
if not self.is_ok():
print("ERROR: Job Failed")
for result in self.results:
if not result.is_ok():
print("Failed checks:")
print(" | ", result)
print(self.to_stdout_formatted())
sys.exit(1)
else:
print("ok")
def to_stdout_formatted(self, indent="", res=""):
if self.is_ok():
return res
res += f"{indent}Task [{self.name}] failed.\n"
fail_info = ""
sub_indent = indent + " "
if not self.results:
if not self.is_ok():
fail_info += f"{sub_indent}{self.name}:\n"
for line in self.info.splitlines():
fail_info += f"{sub_indent}{sub_indent}{line}\n"
return res + fail_info
for sub_result in self.results:
res = sub_result.to_stdout_formatted(sub_indent, res)
return res
class ResultInfo:
SETUP_ENV_JOB_FAILED = (
@ -352,3 +370,202 @@ class ResultInfo:
)
S3_ERROR = "S3 call failure"
class _ResultS3:
@classmethod
def copy_result_to_s3(cls, result, unlock=False):
result.dump()
env = _Environment.get()
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
s3_path_full = f"{s3_path}/{Path(result.file_name()).name}"
url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name())
# if unlock:
# if not cls.unlock(s3_path_full):
# print(f"ERROR: File [{s3_path_full}] unlock failure")
# assert False # TODO: investigate
return url
@classmethod
def copy_result_from_s3(cls, local_path, lock=False):
env = _Environment.get()
file_name = Path(local_path).name
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}"
# if lock:
# cls.lock(s3_path)
if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path):
print(f"ERROR: failed to cp file [{s3_path}] from s3")
raise
@classmethod
def copy_result_from_s3_with_version(cls, local_path):
env = _Environment.get()
file_name = Path(local_path).name
local_dir = Path(local_path).parent
file_name_pattern = f"{file_name}_*"
for file_path in local_dir.glob(file_name_pattern):
file_path.unlink()
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/"
if not S3.copy_file_from_s3_matching_pattern(
s3_path=s3_path, local_path=local_dir, include=file_name_pattern
):
print(f"ERROR: failed to cp file [{s3_path}] from s3")
raise
result_files = []
for file_path in local_dir.glob(file_name_pattern):
result_files.append(file_path)
assert result_files, "No result files found"
result_files.sort()
version = int(result_files[-1].name.split("_")[-1])
Shell.check(f"cp {result_files[-1]} {local_path}", strict=True, verbose=True)
return version
@classmethod
def copy_result_to_s3_with_version(cls, result, version):
result.dump()
filename = Path(result.file_name()).name
file_name_versioned = f"{filename}_{str(version).zfill(3)}"
env = _Environment.get()
s3_path_versioned = (
f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name_versioned}"
)
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/"
if version == 0:
S3.clean_s3_directory(s3_path=s3_path)
if not S3.put(
s3_path=s3_path_versioned,
local_path=result.file_name(),
if_none_matched=True,
):
print("Failed to put versioned Result")
return False
if not S3.put(s3_path=s3_path, local_path=result.file_name()):
print("Failed to put non-versioned Result")
return True
# @classmethod
# def lock(cls, s3_path, level=0):
# env = _Environment.get()
# s3_path_lock = s3_path + f".lock"
# file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}"
# assert Shell.check(
# f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True
# ), "Never"
#
# i = 20
# meta = S3.head_object(s3_path_lock)
# while meta:
# locked_by_job = meta.get("Metadata", {"job": ""}).get("job", "")
# if locked_by_job:
# decoded_bytes = base64.b64decode(locked_by_job)
# locked_by_job = decoded_bytes.decode("utf-8")
# print(
# f"WARNING: Failed to acquire lock, meta [{meta}], job [{locked_by_job}] - wait"
# )
# i -= 5
# if i < 0:
# info = f"ERROR: lock acquire failure - unlock forcefully"
# print(info)
# env.add_info(info)
# break
# time.sleep(5)
#
# metadata = {"job": Utils.to_base64(env.JOB_NAME)}
# S3.put(
# s3_path=s3_path_lock,
# local_path=file_path_lock,
# metadata=metadata,
# if_none_matched=True,
# )
# time.sleep(1)
# obj = S3.head_object(s3_path_lock)
# if not obj or not obj.has_tags(tags=metadata):
# print(f"WARNING: locked by another job [{obj}]")
# env.add_info("S3 lock file failure")
# cls.lock(s3_path, level=level + 1)
# print("INFO: lock acquired")
#
# @classmethod
# def unlock(cls, s3_path):
# s3_path_lock = s3_path + ".lock"
# env = _Environment.get()
# obj = S3.head_object(s3_path_lock)
# if not obj:
# print("ERROR: lock file is removed")
# assert False # investigate
# elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}):
# print("ERROR: lock file was acquired by another job")
# assert False # investigate
#
# if not S3.delete(s3_path_lock):
# print(f"ERROR: File [{s3_path_lock}] delete failure")
# print("INFO: lock released")
# return True
@classmethod
def upload_result_files_to_s3(cls, result):
if result.results:
for result_ in result.results:
cls.upload_result_files_to_s3(result_)
for file in result.files:
if not Path(file).is_file():
print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload")
result.info += f"\nWARNING: Result file [{file}] was not found"
file_link = S3._upload_file_to_s3(file, upload_to_s3=False)
else:
is_text = False
for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS:
if file.endswith(text_file_suffix):
print(
f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object"
)
is_text = True
break
file_link = S3._upload_file_to_s3(
file,
upload_to_s3=True,
text=is_text,
s3_subprefix=Utils.normalize_string(result.name),
)
result.links.append(file_link)
if result.files:
print(
f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list"
)
result.files = []
result.dump()
@classmethod
def update_workflow_results(cls, workflow_name, new_info="", new_sub_results=None):
assert new_info or new_sub_results
attempt = 1
prev_status = ""
new_status = ""
done = False
while attempt < 10:
version = cls.copy_result_from_s3_with_version(
Result.file_name_static(workflow_name)
)
workflow_result = Result.from_fs(workflow_name)
prev_status = workflow_result.status
if new_info:
workflow_result.set_info(new_info)
if new_sub_results:
if isinstance(new_sub_results, Result):
new_sub_results = [new_sub_results]
for result_ in new_sub_results:
workflow_result.update_sub_result(result_)
new_status = workflow_result.status
if cls.copy_result_to_s3_with_version(workflow_result, version=version + 1):
done = True
break
print(f"Attempt [{attempt}] to upload workflow result failed")
attempt += 1
assert done
if prev_status != new_status:
return new_status
else:
return None

View File

@ -19,7 +19,7 @@ from praktika.utils import Shell, TeePopen, Utils
class Runner:
@staticmethod
def generate_dummy_environment(workflow, job):
def generate_local_run_environment(workflow, job, pr=None, branch=None, sha=None):
print("WARNING: Generate dummy env for local test")
Shell.check(
f"mkdir -p {Settings.TEMP_DIR} {Settings.INPUT_DIR} {Settings.OUTPUT_DIR}"
@ -28,9 +28,9 @@ class Runner:
WORKFLOW_NAME=workflow.name,
JOB_NAME=job.name,
REPOSITORY="",
BRANCH="",
SHA="",
PR_NUMBER=-1,
BRANCH=branch or Settings.MAIN_BRANCH if not pr else "",
SHA=sha or Shell.get_output("git rev-parse HEAD"),
PR_NUMBER=pr or -1,
EVENT_TYPE="",
JOB_OUTPUT_STREAM="",
EVENT_FILE_PATH="",
@ -52,6 +52,7 @@ class Runner:
cache_success=[],
cache_success_base64=[],
cache_artifacts={},
cache_jobs={},
)
for docker in workflow.dockers:
workflow_config.digest_dockers[docker.name] = Digest().calc_docker_digest(
@ -80,13 +81,12 @@ class Runner:
print("Read GH Environment")
env = _Environment.from_env()
env.JOB_NAME = job.name
env.PARAMETER = job.parameter
env.dump()
print(env)
return 0
def _pre_run(self, workflow, job):
def _pre_run(self, workflow, job, local_run=False):
env = _Environment.get()
result = Result(
@ -96,9 +96,10 @@ class Runner:
)
result.dump()
if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME:
print("Update Job and Workflow Report")
HtmlRunnerHooks.pre_run(workflow, job)
if not local_run:
if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME:
print("Update Job and Workflow Report")
HtmlRunnerHooks.pre_run(workflow, job)
print("Download required artifacts")
required_artifacts = []
@ -123,28 +124,48 @@ class Runner:
return 0
def _run(self, workflow, job, docker="", no_docker=False, param=None):
def _run(self, workflow, job, docker="", no_docker=False, param=None, test=""):
# re-set envs for local run
env = _Environment.get()
env.JOB_NAME = job.name
env.dump()
if param:
if not isinstance(param, str):
Utils.raise_with_error(
f"Custom param for local tests must be of type str, got [{type(param)}]"
)
env = _Environment.get()
env.dump()
if job.run_in_docker and not no_docker:
# TODO: add support for any image, including not from ci config (e.g. ubuntu:latest)
docker_tag = RunConfig.from_fs(workflow.name).digest_dockers[
job.run_in_docker
]
docker = docker or f"{job.run_in_docker}:{docker_tag}"
cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}"
job.run_in_docker, docker_settings = (
job.run_in_docker.split("+")[0],
job.run_in_docker.split("+")[1:],
)
from_root = "root" in docker_settings
settings = [s for s in docker_settings if s.startswith("--")]
if ":" in job.run_in_docker:
docker_name, docker_tag = job.run_in_docker.split(":")
print(
f"WARNING: Job [{job.name}] use custom docker image with a tag - praktika won't control docker version"
)
else:
docker_name, docker_tag = (
job.run_in_docker,
RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker],
)
docker = docker or f"{docker_name}:{docker_tag}"
cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {' '.join(settings)} {docker} {job.command}"
else:
cmd = job.command
python_path = os.getenv("PYTHONPATH", ":")
os.environ["PYTHONPATH"] = f".:{python_path}"
if param:
print(f"Custom --param [{param}] will be passed to job's script")
cmd += f" --param {param}"
if test:
print(f"Custom --test [{test}] will be passed to job's script")
cmd += f" --test {test}"
print(f"--- Run command [{cmd}]")
with TeePopen(cmd, timeout=job.timeout) as process:
@ -219,13 +240,10 @@ class Runner:
print(info)
result.set_info(info).set_status(Result.Status.ERROR).dump()
result.set_files(files=[Settings.RUN_LOG])
if not result.is_ok():
result.set_files(files=[Settings.RUN_LOG])
result.update_duration().dump()
if result.info and result.status != Result.Status.SUCCESS:
# provide job info to workflow level
info_errors.append(result.info)
if run_exit_code == 0:
providing_artifacts = []
if job.provides and workflow.artifacts:
@ -285,14 +303,24 @@ class Runner:
return True
def run(
self, workflow, job, docker="", dummy_env=False, no_docker=False, param=None
self,
workflow,
job,
docker="",
local_run=False,
no_docker=False,
param=None,
test="",
pr=None,
sha=None,
branch=None,
):
res = True
setup_env_code = -10
prerun_code = -10
run_code = -10
if res and not dummy_env:
if res and not local_run:
print(
f"\n\n=== Setup env script [{job.name}], workflow [{workflow.name}] ==="
)
@ -309,13 +337,15 @@ class Runner:
traceback.print_exc()
print(f"=== Setup env finished ===\n\n")
else:
self.generate_dummy_environment(workflow, job)
self.generate_local_run_environment(
workflow, job, pr=pr, branch=branch, sha=sha
)
if res and not dummy_env:
if res and (not local_run or pr or sha or branch):
res = False
print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===")
try:
prerun_code = self._pre_run(workflow, job)
prerun_code = self._pre_run(workflow, job, local_run=local_run)
res = prerun_code == 0
if not res:
print(f"ERROR: Pre-run failed with exit code [{prerun_code}]")
@ -329,7 +359,12 @@ class Runner:
print(f"=== Run script [{job.name}], workflow [{workflow.name}] ===")
try:
run_code = self._run(
workflow, job, docker=docker, no_docker=no_docker, param=param
workflow,
job,
docker=docker,
no_docker=no_docker,
param=param,
test=test,
)
res = run_code == 0
if not res:
@ -339,7 +374,7 @@ class Runner:
traceback.print_exc()
print(f"=== Run scrip finished ===\n\n")
if not dummy_env:
if not local_run:
print(f"=== Post run script [{job.name}], workflow [{workflow.name}] ===")
self._post_run(workflow, job, setup_env_code, prerun_code, run_code)
print(f"=== Post run scrip finished ===")

View File

@ -15,17 +15,23 @@ class RunConfig(MetaClasses.Serializable):
# there are might be issue with special characters in job names if used directly in yaml syntax - create base64 encoded list to avoid this
cache_success_base64: List[str]
cache_artifacts: Dict[str, Cache.CacheRecord]
cache_jobs: Dict[str, Cache.CacheRecord]
sha: str
@classmethod
def from_dict(cls, obj):
cache_artifacts = obj["cache_artifacts"]
cache_jobs = obj["cache_jobs"]
cache_artifacts_deserialized = {}
cache_jobs_deserialized = {}
for artifact_name, cache_artifact in cache_artifacts.items():
cache_artifacts_deserialized[artifact_name] = Cache.CacheRecord.from_dict(
cache_artifact
)
obj["cache_artifacts"] = cache_artifacts_deserialized
for job_name, cache_jobs in cache_jobs.items():
cache_jobs_deserialized[job_name] = Cache.CacheRecord.from_dict(cache_jobs)
obj["cache_jobs"] = cache_artifacts_deserialized
return RunConfig(**obj)
@classmethod

View File

@ -1,12 +1,11 @@
import dataclasses
import json
import time
from pathlib import Path
from typing import Dict
from praktika._environment import _Environment
from praktika.settings import Settings
from praktika.utils import Shell, Utils
from praktika.utils import Shell
class S3:
@ -52,23 +51,22 @@ class S3:
cmd += " --content-type text/plain"
res = cls.run_command_with_retries(cmd)
if not res:
raise
raise RuntimeError()
bucket = s3_path.split("/")[0]
endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket]
assert endpoint
return f"https://{s3_full_path}".replace(bucket, endpoint)
@classmethod
def put(cls, s3_path, local_path, text=False, metadata=None):
def put(cls, s3_path, local_path, text=False, metadata=None, if_none_matched=False):
assert Path(local_path).exists(), f"Path [{local_path}] does not exist"
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
assert Path(
local_path
).is_file(), f"Path [{local_path}] is not file. Only files are supported"
file_name = Path(local_path).name
s3_full_path = s3_path
if not s3_full_path.endswith(file_name):
s3_full_path = f"{s3_path}/{Path(local_path).name}"
if s3_full_path.endswith("/"):
s3_full_path = f"{s3_path}{Path(local_path).name}"
s3_full_path = str(s3_full_path).removeprefix("s3://")
bucket, key = s3_full_path.split("/", maxsplit=1)
@ -76,6 +74,8 @@ class S3:
command = (
f"aws s3api put-object --bucket {bucket} --key {key} --body {local_path}"
)
if if_none_matched:
command += f' --if-none-match "*"'
if metadata:
for k, v in metadata.items():
command += f" --metadata {k}={v}"
@ -84,7 +84,7 @@ class S3:
if text:
cmd += " --content-type text/plain"
res = cls.run_command_with_retries(command)
assert res
return res
@classmethod
def run_command_with_retries(cls, command, retries=Settings.MAX_RETRIES_S3):
@ -101,6 +101,14 @@ class S3:
elif "does not exist" in stderr:
print("ERROR: requested file does not exist")
break
elif "Unknown options" in stderr:
print("ERROR: Invalid AWS CLI command or CLI client version:")
print(f" | awc error: {stderr}")
break
elif "PreconditionFailed" in stderr:
print("ERROR: AWS API Call Precondition Failed")
print(f" | awc error: {stderr}")
break
if ret_code != 0:
print(
f"ERROR: aws s3 cp failed, stdout/stderr err: [{stderr}], out [{stdout}]"
@ -108,13 +116,6 @@ class S3:
res = ret_code == 0
return res
@classmethod
def get_link(cls, s3_path, local_path):
s3_full_path = f"{s3_path}/{Path(local_path).name}"
bucket = s3_path.split("/")[0]
endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket]
return f"https://{s3_full_path}".replace(bucket, endpoint)
@classmethod
def copy_file_from_s3(cls, s3_path, local_path):
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
@ -128,6 +129,19 @@ class S3:
res = cls.run_command_with_retries(cmd)
return res
@classmethod
def copy_file_from_s3_matching_pattern(
cls, s3_path, local_path, include, exclude="*"
):
assert Path(s3_path), f"Invalid S3 Path [{s3_path}]"
assert Path(
local_path
).is_dir(), f"Path [{local_path}] does not exist or not a directory"
assert s3_path.endswith("/"), f"s3 path is invalid [{s3_path}]"
cmd = f'aws s3 cp s3://{s3_path} {local_path} --exclude "{exclude}" --include "{include}" --recursive'
res = cls.run_command_with_retries(cmd)
return res
@classmethod
def head_object(cls, s3_path):
s3_path = str(s3_path).removeprefix("s3://")
@ -148,103 +162,6 @@ class S3:
verbose=True,
)
# TODO: apparently should be placed into separate file to be used only inside praktika
# keeping this module clean from importing Settings, Environment and etc, making it easy for use externally
@classmethod
def copy_result_to_s3(cls, result, unlock=True):
result.dump()
env = _Environment.get()
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
s3_path_full = f"{s3_path}/{Path(result.file_name()).name}"
url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name())
if env.PR_NUMBER:
print("Duplicate Result for latest commit alias in PR")
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True)}"
url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name())
if unlock:
if not cls.unlock(s3_path_full):
print(f"ERROR: File [{s3_path_full}] unlock failure")
assert False # TODO: investigate
return url
@classmethod
def copy_result_from_s3(cls, local_path, lock=True):
env = _Environment.get()
file_name = Path(local_path).name
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}"
if lock:
cls.lock(s3_path)
if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path):
print(f"ERROR: failed to cp file [{s3_path}] from s3")
raise
@classmethod
def lock(cls, s3_path, level=0):
assert level < 3, "Never"
env = _Environment.get()
s3_path_lock = s3_path + f".lock"
file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}"
assert Shell.check(
f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True
), "Never"
i = 20
meta = S3.head_object(s3_path_lock)
while meta:
print(f"WARNING: Failed to acquire lock, meta [{meta}] - wait")
i -= 5
if i < 0:
info = f"ERROR: lock acquire failure - unlock forcefully"
print(info)
env.add_info(info)
break
time.sleep(5)
metadata = {"job": Utils.to_base64(env.JOB_NAME)}
S3.put(
s3_path=s3_path_lock,
local_path=file_path_lock,
metadata=metadata,
)
time.sleep(1)
obj = S3.head_object(s3_path_lock)
if not obj or not obj.has_tags(tags=metadata):
print(f"WARNING: locked by another job [{obj}]")
env.add_info("S3 lock file failure")
cls.lock(s3_path, level=level + 1)
print("INFO: lock acquired")
@classmethod
def unlock(cls, s3_path):
s3_path_lock = s3_path + ".lock"
env = _Environment.get()
obj = S3.head_object(s3_path_lock)
if not obj:
print("ERROR: lock file is removed")
assert False # investigate
elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}):
print("ERROR: lock file was acquired by another job")
assert False # investigate
if not S3.delete(s3_path_lock):
print(f"ERROR: File [{s3_path_lock}] delete failure")
print("INFO: lock released")
return True
@classmethod
def get_result_link(cls, result):
env = _Environment.get()
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True if env.PR_NUMBER else False)}"
return S3.get_link(s3_path=s3_path, local_path=result.file_name())
@classmethod
def clean_latest_result(cls):
env = _Environment.get()
env.SHA = "latest"
assert env.PR_NUMBER
s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}"
S3.clean_s3_directory(s3_path=s3_path)
@classmethod
def _upload_file_to_s3(
cls, local_file_path, upload_to_s3: bool, text: bool = False, s3_subprefix=""
@ -260,36 +177,3 @@ class S3:
)
return html_link
return f"file://{Path(local_file_path).absolute()}"
@classmethod
def upload_result_files_to_s3(cls, result):
if result.results:
for result_ in result.results:
cls.upload_result_files_to_s3(result_)
for file in result.files:
if not Path(file).is_file():
print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload")
result.info += f"\nWARNING: Result file [{file}] was not found"
file_link = cls._upload_file_to_s3(file, upload_to_s3=False)
else:
is_text = False
for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS:
if file.endswith(text_file_suffix):
print(
f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object"
)
is_text = True
break
file_link = cls._upload_file_to_s3(
file,
upload_to_s3=True,
text=is_text,
s3_subprefix=Utils.normalize_string(result.name),
)
result.links.append(file_link)
if result.files:
print(
f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list"
)
result.files = []
result.dump()

View File

@ -1,8 +1,152 @@
from praktika._settings import _Settings
from praktika.mangle import _get_user_settings
import dataclasses
import importlib.util
from pathlib import Path
from typing import Dict, Iterable, List, Optional
Settings = _Settings()
user_settings = _get_user_settings()
for setting, value in user_settings.items():
Settings.__setattr__(setting, value)
@dataclasses.dataclass
class _Settings:
######################################
# Pipeline generation settings #
######################################
MAIN_BRANCH = "main"
CI_PATH = "./ci"
WORKFLOW_PATH_PREFIX: str = "./.github/workflows"
WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows"
SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings"
CI_CONFIG_JOB_NAME = "Config Workflow"
DOCKER_BUILD_JOB_NAME = "Docker Builds"
FINISH_WORKFLOW_JOB_NAME = "Finish Workflow"
READY_FOR_MERGE_STATUS_NAME = "Ready for Merge"
CI_CONFIG_RUNS_ON: Optional[List[str]] = None
DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None
VALIDATE_FILE_PATHS: bool = True
######################################
# Runtime Settings #
######################################
MAX_RETRIES_S3 = 3
MAX_RETRIES_GH = 3
######################################
# S3 (artifact storage) settings #
######################################
S3_ARTIFACT_PATH: str = ""
######################################
# CI workspace settings #
######################################
TEMP_DIR: str = "/tmp/praktika"
OUTPUT_DIR: str = f"{TEMP_DIR}/output"
INPUT_DIR: str = f"{TEMP_DIR}/input"
PYTHON_INTERPRETER: str = "python3"
PYTHON_PACKET_MANAGER: str = "pip3"
PYTHON_VERSION: str = "3.9"
INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False
INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt"
ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json"
RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log"
SECRET_GH_APP_ID: str = "GH_APP_ID"
SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY"
ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh"
WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json"
######################################
# CI Cache settings #
######################################
CACHE_VERSION: int = 1
CACHE_DIGEST_LEN: int = 20
CACHE_S3_PATH: str = ""
CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache"
######################################
# Report settings #
######################################
HTML_S3_PATH: str = ""
HTML_PAGE_FILE: str = "./praktika/json.html"
TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"])
S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None
DOCKERHUB_USERNAME: str = ""
DOCKERHUB_SECRET: str = ""
DOCKER_WD: str = "/wd"
######################################
# CI DB Settings #
######################################
SECRET_CI_DB_URL: str = "CI_DB_URL"
SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD"
CI_DB_DB_NAME = ""
CI_DB_TABLE_NAME = ""
CI_DB_INSERT_TIMEOUT_SEC = 5
DISABLE_MERGE_COMMIT = True
_USER_DEFINED_SETTINGS = [
"S3_ARTIFACT_PATH",
"CACHE_S3_PATH",
"HTML_S3_PATH",
"S3_BUCKET_TO_HTTP_ENDPOINT",
"TEXT_CONTENT_EXTENSIONS",
"TEMP_DIR",
"OUTPUT_DIR",
"INPUT_DIR",
"CI_CONFIG_RUNS_ON",
"DOCKER_BUILD_RUNS_ON",
"CI_CONFIG_JOB_NAME",
"PYTHON_INTERPRETER",
"PYTHON_VERSION",
"PYTHON_PACKET_MANAGER",
"INSTALL_PYTHON_FOR_NATIVE_JOBS",
"INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS",
"MAX_RETRIES_S3",
"MAX_RETRIES_GH",
"VALIDATE_FILE_PATHS",
"DOCKERHUB_USERNAME",
"DOCKERHUB_SECRET",
"READY_FOR_MERGE_STATUS_NAME",
"SECRET_CI_DB_URL",
"SECRET_CI_DB_PASSWORD",
"CI_DB_DB_NAME",
"CI_DB_TABLE_NAME",
"CI_DB_INSERT_TIMEOUT_SEC",
"SECRET_GH_APP_PEM_KEY",
"SECRET_GH_APP_ID",
"MAIN_BRANCH",
"DISABLE_MERGE_COMMIT",
]
def _get_settings() -> _Settings:
res = _Settings()
directory = Path(_Settings.SETTINGS_DIRECTORY)
for py_file in directory.glob("*.py"):
module_name = py_file.name.removeprefix(".py")
spec = importlib.util.spec_from_file_location(
module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}"
)
assert spec
foo = importlib.util.module_from_spec(spec)
assert spec.loader
spec.loader.exec_module(foo)
for setting in _USER_DEFINED_SETTINGS:
try:
value = getattr(foo, setting)
res.__setattr__(setting, value)
# print(f"- read user defined setting [{setting} = {value}]")
except Exception as e:
# print(f"Exception while read user settings: {e}")
pass
return res
class GHRunners:
ubuntu = "ubuntu-latest"
Settings = _get_settings()

View File

@ -17,8 +17,6 @@ from threading import Thread
from types import SimpleNamespace
from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar, Union
from praktika._settings import _Settings
T = TypeVar("T", bound="Serializable")
@ -81,25 +79,26 @@ class MetaClasses:
class ContextManager:
@staticmethod
@contextmanager
def cd(to: Optional[Union[Path, str]] = None) -> Iterator[None]:
def cd(to: Optional[Union[Path, str]]) -> Iterator[None]:
"""
changes current working directory to @path or `git root` if @path is None
:param to:
:return:
"""
if not to:
try:
to = Shell.get_output_or_raise("git rev-parse --show-toplevel")
except:
pass
if not to:
if Path(_Settings.DOCKER_WD).is_dir():
to = _Settings.DOCKER_WD
if not to:
assert False, "FIX IT"
assert to
# if not to:
# try:
# to = Shell.get_output_or_raise("git rev-parse --show-toplevel")
# except:
# pass
# if not to:
# if Path(_Settings.DOCKER_WD).is_dir():
# to = _Settings.DOCKER_WD
# if not to:
# assert False, "FIX IT"
# assert to
old_pwd = os.getcwd()
os.chdir(to)
if to:
os.chdir(to)
try:
yield
finally:

View File

@ -4,10 +4,8 @@ from itertools import chain
from pathlib import Path
from praktika import Workflow
from praktika._settings import GHRunners
from praktika.mangle import _get_workflows
from praktika.settings import Settings
from praktika.utils import ContextManager
from praktika.settings import GHRunners, Settings
class Validator:
@ -119,61 +117,56 @@ class Validator:
def validate_file_paths_in_run_command(cls, workflow: Workflow.Config) -> None:
if not Settings.VALIDATE_FILE_PATHS:
return
with ContextManager.cd():
for job in workflow.jobs:
run_command = job.command
command_parts = run_command.split(" ")
for part in command_parts:
if ">" in part:
return
if "/" in part:
assert (
Path(part).is_file() or Path(part).is_dir()
), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS"
for job in workflow.jobs:
run_command = job.command
command_parts = run_command.split(" ")
for part in command_parts:
if ">" in part:
return
if "/" in part:
assert (
Path(part).is_file() or Path(part).is_dir()
), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS"
@classmethod
def validate_file_paths_in_digest_configs(cls, workflow: Workflow.Config) -> None:
if not Settings.VALIDATE_FILE_PATHS:
return
with ContextManager.cd():
for job in workflow.jobs:
if not job.digest_config:
continue
for include_path in chain(
job.digest_config.include_paths, job.digest_config.exclude_paths
):
if "*" in include_path:
assert glob.glob(
include_path, recursive=True
), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS"
else:
assert (
Path(include_path).is_file() or Path(include_path).is_dir()
), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS"
for job in workflow.jobs:
if not job.digest_config:
continue
for include_path in chain(
job.digest_config.include_paths, job.digest_config.exclude_paths
):
if "*" in include_path:
assert glob.glob(
include_path, recursive=True
), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS"
else:
assert (
Path(include_path).is_file() or Path(include_path).is_dir()
), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS"
@classmethod
def validate_requirements_txt_files(cls, workflow: Workflow.Config) -> None:
with ContextManager.cd():
for job in workflow.jobs:
if job.job_requirements:
if job.job_requirements.python_requirements_txt:
path = Path(job.job_requirements.python_requirements_txt)
message = f"File with py requirement [{path}] does not exist"
if job.name in (
Settings.DOCKER_BUILD_JOB_NAME,
Settings.CI_CONFIG_JOB_NAME,
Settings.FINISH_WORKFLOW_JOB_NAME,
):
message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""'
message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):"
message += "\n echo jwt==1.3.1 > ./ci/requirements.txt"
message += (
"\n echo requests==2.32.3 >> ./ci/requirements.txt"
)
message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt"
cls.evaluate_check(
path.is_file(), message, job.name, workflow.name
for job in workflow.jobs:
if job.job_requirements:
if job.job_requirements.python_requirements_txt:
path = Path(job.job_requirements.python_requirements_txt)
message = f"File with py requirement [{path}] does not exist"
if job.name in (
Settings.DOCKER_BUILD_JOB_NAME,
Settings.CI_CONFIG_JOB_NAME,
Settings.FINISH_WORKFLOW_JOB_NAME,
):
message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""'
message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):"
message += "\n echo jwt==1.3.1 > ./ci/requirements.txt"
message += (
"\n echo requests==2.32.3 >> ./ci/requirements.txt"
)
message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt"
cls.evaluate_check(path.is_file(), message, job.name, workflow.name)
@classmethod
def validate_dockers(cls, workflow: Workflow.Config):

View File

@ -31,6 +31,7 @@ class Workflow:
enable_report: bool = False
enable_merge_ready_status: bool = False
enable_cidb: bool = False
enable_merge_commit: bool = False
def is_event_pull_request(self):
return self.event == Workflow.Event.PULL_REQUEST

View File

@ -80,6 +80,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{{{ github.head_ref }}}}
{JOB_ADDONS}
- name: Prepare env script
run: |
@ -102,7 +104,11 @@ jobs:
run: |
. /tmp/praktika_setup_env.sh
set -o pipefail
{PYTHON} -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee {RUN_LOG}
if command -v ts &> /dev/null; then
python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log
else
python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee /tmp/praktika/praktika_run.log
fi
{UPLOADS_GITHUB}\
"""
@ -184,12 +190,10 @@ jobs:
False
), f"Workflow event not yet supported [{workflow_config.event}]"
with ContextManager.cd():
with open(self._get_workflow_file_name(workflow_config.name), "w") as f:
f.write(yaml_workflow_str)
with open(self._get_workflow_file_name(workflow_config.name), "w") as f:
f.write(yaml_workflow_str)
with ContextManager.cd():
Shell.check("git add ./.github/workflows/*.yaml")
Shell.check("git add ./.github/workflows/*.yaml")
class PullRequestPushYamlGen:

View File

@ -7,24 +7,33 @@ S3_BUCKET_HTTP_ENDPOINT = "clickhouse-builds.s3.amazonaws.com"
class RunnerLabels:
CI_SERVICES = "ci_services"
CI_SERVICES_EBS = "ci_services_ebs"
BUILDER = "builder"
BUILDER_AMD = "builder"
BUILDER_ARM = "builder-aarch64"
FUNC_TESTER_AMD = "func-tester"
FUNC_TESTER_ARM = "func-tester-aarch64"
BASE_BRANCH = "master"
azure_secret = Secret.Config(
name="azure_connection_string",
type=Secret.Type.AWS_SSM_VAR,
)
SECRETS = [
Secret.Config(
name="dockerhub_robot_password",
type=Secret.Type.AWS_SSM_VAR,
),
Secret.Config(
name="woolenwolf_gh_app.clickhouse-app-id",
type=Secret.Type.AWS_SSM_SECRET,
),
Secret.Config(
name="woolenwolf_gh_app.clickhouse-app-key",
type=Secret.Type.AWS_SSM_SECRET,
),
azure_secret,
# Secret.Config(
# name="woolenwolf_gh_app.clickhouse-app-id",
# type=Secret.Type.AWS_SSM_SECRET,
# ),
# Secret.Config(
# name="woolenwolf_gh_app.clickhouse-app-key",
# type=Secret.Type.AWS_SSM_SECRET,
# ),
]
DOCKERS = [
@ -118,18 +127,18 @@ DOCKERS = [
# platforms=Docker.Platforms.arm_amd,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/stateless-test",
# path="./ci/docker/test/stateless",
# platforms=Docker.Platforms.arm_amd,
# depends_on=["clickhouse/test-base"],
# ),
# Docker.Config(
# name="clickhouse/stateful-test",
# path="./ci/docker/test/stateful",
# platforms=Docker.Platforms.arm_amd,
# depends_on=["clickhouse/stateless-test"],
# ),
Docker.Config(
name="clickhouse/stateless-test",
path="./ci/docker/stateless-test",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
name="clickhouse/stateful-test",
path="./ci/docker/stateful-test",
platforms=Docker.Platforms.arm_amd,
depends_on=["clickhouse/stateless-test"],
),
# Docker.Config(
# name="clickhouse/stress-test",
# path="./ci/docker/test/stress",
@ -230,4 +239,6 @@ DOCKERS = [
class JobNames:
STYLE_CHECK = "Style Check"
FAST_TEST = "Fast test"
BUILD_AMD_DEBUG = "Build amd64 debug"
BUILD = "Build"
STATELESS = "Stateless tests"
STATEFUL = "Stateful tests"

View File

@ -4,6 +4,8 @@ from ci.settings.definitions import (
RunnerLabels,
)
MAIN_BRANCH = "master"
S3_ARTIFACT_PATH = f"{S3_BUCKET_NAME}/artifacts"
CI_CONFIG_RUNS_ON = [RunnerLabels.CI_SERVICES]
DOCKER_BUILD_RUNS_ON = [RunnerLabels.CI_SERVICES_EBS]

View File

@ -1,5 +1,3 @@
from typing import List
from praktika import Artifact, Job, Workflow
from praktika.settings import Settings
@ -13,7 +11,10 @@ from ci.settings.definitions import (
class ArtifactNames:
ch_debug_binary = "clickhouse_debug_binary"
CH_AMD_DEBUG = "CH_AMD_DEBUG"
CH_AMD_RELEASE = "CH_AMD_RELEASE"
CH_ARM_RELEASE = "CH_ARM_RELEASE"
CH_ARM_ASAN = "CH_ARM_ASAN"
style_check_job = Job.Config(
@ -25,7 +26,7 @@ style_check_job = Job.Config(
fast_test_job = Job.Config(
name=JobNames.FAST_TEST,
runs_on=[RunnerLabels.BUILDER],
runs_on=[RunnerLabels.BUILDER_AMD],
command="python3 ./ci/jobs/fast_test.py",
run_in_docker="clickhouse/fasttest",
digest_config=Job.CacheDigestConfig(
@ -37,11 +38,13 @@ fast_test_job = Job.Config(
),
)
job_build_amd_debug = Job.Config(
name=JobNames.BUILD_AMD_DEBUG,
runs_on=[RunnerLabels.BUILDER],
command="python3 ./ci/jobs/build_clickhouse.py amd_debug",
build_jobs = Job.Config(
name=JobNames.BUILD,
runs_on=["...from params..."],
requires=[JobNames.FAST_TEST],
command="python3 ./ci/jobs/build_clickhouse.py --build-type {PARAMETER}",
run_in_docker="clickhouse/fasttest",
timeout=3600 * 2,
digest_config=Job.CacheDigestConfig(
include_paths=[
"./src",
@ -54,9 +57,85 @@ job_build_amd_debug = Job.Config(
"./docker/packager/packager",
"./rust",
"./tests/ci/version_helper.py",
"./ci/jobs/build_clickhouse.py",
],
),
provides=[ArtifactNames.ch_debug_binary],
).parametrize(
parameter=["amd_debug", "amd_release", "arm_release", "arm_asan"],
provides=[
[ArtifactNames.CH_AMD_DEBUG],
[ArtifactNames.CH_AMD_RELEASE],
[ArtifactNames.CH_ARM_RELEASE],
[ArtifactNames.CH_ARM_ASAN],
],
runs_on=[
[RunnerLabels.BUILDER_AMD],
[RunnerLabels.BUILDER_AMD],
[RunnerLabels.BUILDER_ARM],
[RunnerLabels.BUILDER_ARM],
],
)
stateless_tests_jobs = Job.Config(
name=JobNames.STATELESS,
runs_on=[RunnerLabels.BUILDER_AMD],
command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}",
# many tests expect to see "/var/lib/clickhouse" in various output lines - add mount for now, consider creating this dir in docker file
run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined",
digest_config=Job.CacheDigestConfig(
include_paths=[
"./ci/jobs/functional_stateless_tests.py",
],
),
).parametrize(
parameter=[
"amd_debug,parallel",
"amd_debug,non-parallel",
"amd_release,parallel",
"amd_release,non-parallel",
"arm_asan,parallel",
"arm_asan,non-parallel",
],
runs_on=[
[RunnerLabels.BUILDER_AMD],
[RunnerLabels.FUNC_TESTER_AMD],
[RunnerLabels.BUILDER_AMD],
[RunnerLabels.FUNC_TESTER_AMD],
[RunnerLabels.BUILDER_ARM],
[RunnerLabels.FUNC_TESTER_ARM],
],
requires=[
[ArtifactNames.CH_AMD_DEBUG],
[ArtifactNames.CH_AMD_DEBUG],
[ArtifactNames.CH_AMD_RELEASE],
[ArtifactNames.CH_AMD_RELEASE],
[ArtifactNames.CH_ARM_ASAN],
[ArtifactNames.CH_ARM_ASAN],
],
)
stateful_tests_jobs = Job.Config(
name=JobNames.STATEFUL,
runs_on=[RunnerLabels.BUILDER_AMD],
command="python3 ./ci/jobs/functional_stateful_tests.py --test-options {PARAMETER}",
# many tests expect to see "/var/lib/clickhouse"
# some tests expect to see "/var/log/clickhouse"
run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined",
digest_config=Job.CacheDigestConfig(
include_paths=[
"./ci/jobs/functional_stateful_tests.py",
],
),
).parametrize(
parameter=[
"amd_debug,parallel",
],
runs_on=[
[RunnerLabels.BUILDER_AMD],
],
requires=[
[ArtifactNames.CH_AMD_DEBUG],
],
)
workflow = Workflow.Config(
@ -66,14 +145,31 @@ workflow = Workflow.Config(
jobs=[
style_check_job,
fast_test_job,
job_build_amd_debug,
*build_jobs,
*stateless_tests_jobs,
*stateful_tests_jobs,
],
artifacts=[
Artifact.Config(
name=ArtifactNames.ch_debug_binary,
name=ArtifactNames.CH_AMD_DEBUG,
type=Artifact.Type.S3,
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
)
),
Artifact.Config(
name=ArtifactNames.CH_AMD_RELEASE,
type=Artifact.Type.S3,
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
),
Artifact.Config(
name=ArtifactNames.CH_ARM_RELEASE,
type=Artifact.Type.S3,
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
),
Artifact.Config(
name=ArtifactNames.CH_ARM_ASAN,
type=Artifact.Type.S3,
path=f"{Settings.TEMP_DIR}/build/programs/clickhouse",
),
],
dockers=DOCKERS,
secrets=SECRETS,
@ -84,11 +180,14 @@ workflow = Workflow.Config(
WORKFLOWS = [
workflow,
] # type: List[Workflow.Config]
]
if __name__ == "__main__":
# local job test inside praktika environment
from praktika.runner import Runner
Runner().run(workflow, fast_test_job, docker="fasttest", dummy_env=True)
# if __name__ == "__main__":
# # local job test inside praktika environment
# from praktika.runner import Runner
# from praktika.digest import Digest
#
# print(Digest().calc_job_digest(amd_debug_build_job))
#
# Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True)

View File

@ -2,11 +2,11 @@
# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54492)
SET(VERSION_REVISION 54493)
SET(VERSION_MAJOR 24)
SET(VERSION_MINOR 11)
SET(VERSION_MINOR 12)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH c82cf25b3e5864bcc153cbe45adb8c6527e1ec6e)
SET(VERSION_DESCRIBE v24.11.1.1-testing)
SET(VERSION_STRING 24.11.1.1)
SET(VERSION_GITHASH e4c9b022237992620c966d032cee495da8d0b5ac)
SET(VERSION_DESCRIBE v24.12.1.1-testing)
SET(VERSION_STRING 24.12.1.1)
# end of autochange

View File

@ -5,14 +5,14 @@ if (ENABLE_CLANG_TIDY)
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if (CLANG_TIDY_CACHE_PATH)
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-18" "clang-tidy-17" "clang-tidy-16" "clang-tidy")
find_program (_CLANG_TIDY_PATH NAMES "clang-tidy-19" "clang-tidy-18" "clang-tidy-17" "clang-tidy")
# Why do we use ';' here?
# It's a cmake black magic: https://cmake.org/cmake/help/latest/prop_tgt/LANG_CLANG_TIDY.html#prop_tgt:%3CLANG%3E_CLANG_TIDY
# The CLANG_TIDY_PATH is passed to CMAKE_CXX_CLANG_TIDY, which follows CXX_CLANG_TIDY syntax.
set (CLANG_TIDY_PATH "${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_PATH}" CACHE STRING "A combined command to run clang-tidy with caching wrapper")
else ()
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-18" "clang-tidy-17" "clang-tidy-16" "clang-tidy")
find_program (CLANG_TIDY_PATH NAMES "clang-tidy-19" "clang-tidy-18" "clang-tidy-17" "clang-tidy")
endif ()
if (CLANG_TIDY_PATH)

View File

@ -74,6 +74,7 @@ elseif (ARCH_AARCH64)
# introduced as optional, either in v8.2 [7] or in v8.4 [8].
# rcpc: Load-Acquire RCpc Register. Better support of release/acquire of atomics. Good for allocators and high contention code.
# Optional in v8.2, mandatory in v8.3 [9]. Supported in Graviton >=2, Azure and GCP instances.
# bf16: Bfloat16, a half-precision floating point format developed by Google Brain. Optional in v8.2, mandatory in v8.6.
#
# [1] https://github.com/aws/aws-graviton-getting-started/blob/main/c-c%2B%2B.md
# [2] https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10
@ -85,7 +86,7 @@ elseif (ARCH_AARCH64)
# [8] https://developer.arm.com/documentation/102651/a/What-are-dot-product-intructions-
# [9] https://developer.arm.com/documentation/dui0801/g/A64-Data-Transfer-Instructions/LDAPR?lang=en
# [10] https://github.com/aws/aws-graviton-getting-started/blob/main/README.md
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc")
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc+bf16")
endif ()
# Best-effort check: The build generates and executes intermediate binaries, e.g. protoc and llvm-tablegen. If we build on ARM for ARM

View File

@ -3,8 +3,7 @@
set (DEFAULT_LIBS "-nodefaultlibs")
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
# See https://bugs.llvm.org/show_bug.cgi?id=16404
# We need builtins from Clang
execute_process (COMMAND
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
OUTPUT_VARIABLE BUILTINS_LIBRARY

View File

@ -17,9 +17,4 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd")
# Currently, lld does not work with the error:
# ld.lld: error: section size decrease is too large
# But GNU BinUtils work.
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "Linker name" FORCE)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld")

View File

@ -217,7 +217,11 @@ add_contrib (libssh-cmake libssh)
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
add_contrib(numactl-cmake numactl)
add_contrib (numactl-cmake numactl)
add_contrib (google-cloud-cpp-cmake google-cloud-cpp) # requires grpc, protobuf, absl
add_contrib (jwt-cpp-cmake jwt-cpp)
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear

2
contrib/SimSIMD vendored

@ -1 +1 @@
Subproject commit ee3c9c9c00b51645f62a1a9e99611b78c0052a21
Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3

View File

@ -57,6 +57,7 @@ endif()
SET(AWS_SDK_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws")
SET(AWS_SDK_CORE_DIR "${AWS_SDK_DIR}/src/aws-cpp-sdk-core")
SET(AWS_SDK_S3_DIR "${AWS_SDK_DIR}/generated/src/aws-cpp-sdk-s3")
SET(AWS_SDK_KMS_DIR "${AWS_SDK_DIR}/generated/src/aws-cpp-sdk-kms")
SET(AWS_AUTH_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-auth")
SET(AWS_CAL_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-cal")
@ -145,6 +146,17 @@ list(APPEND AWS_SOURCES ${AWS_SDK_S3_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_S3_DIR}/include/")
if(CLICKHOUSE_CLOUD)
# aws-cpp-sdk-kms
file(GLOB AWS_SDK_KMS_SRC
"${AWS_SDK_KMS_DIR}/source/*.cpp"
"${AWS_SDK_KMS_DIR}/source/model/*.cpp"
)
list(APPEND AWS_SOURCES ${AWS_SDK_KMS_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_KMS_DIR}/include/")
endif()
# aws-c-auth
file(GLOB AWS_AUTH_SRC
"${AWS_AUTH_DIR}/source/*.c"

1
contrib/google-cloud-cpp vendored Submodule

@ -0,0 +1 @@
Subproject commit 83f30caadb8613fb5c408d8c2fd545291596b53f

View File

@ -0,0 +1,105 @@
set(ENABLE_GOOGLE_CLOUD_CPP_DEFAULT OFF)
if(ENABLE_LIBRARIES AND CLICKHOUSE_CLOUD AND OS_LINUX)
set(ENABLE_GOOGLE_CLOUD_CPP_DEFAULT ON)
endif()
option(ENABLE_GOOGLE_CLOUD_CPP "Enable Google Cloud Cpp" ${ENABLE_GOOGLE_CLOUD_CPP_DEFAULT})
if(NOT ENABLE_GOOGLE_CLOUD_CPP)
message(STATUS "Not using Google Cloud Cpp")
return()
endif()
if(NOT ENABLE_GRPC)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use Google Cloud Cpp without gRPC")
endif()
if (NOT ENABLE_PROTOBUF)
message( ${RECONFIGURE_MESSAGE_LEVEL} "Can't use Google Cloud Cpp without protobuf")
endif()
# Gather sources and options.
set(GOOGLE_CLOUD_CPP_SOURCES)
set(GOOGLE_CLOUD_CPP_PUBLIC_INCLUDES)
set(GOOGLE_CLOUD_CPP_PRIVATE_INCLUDES)
set(GOOGLE_CLOUD_CPP_PRIVATE_LIBS)
# Directories.
SET(GOOGLE_CLOUD_CPP_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-cloud-cpp" )
list(APPEND GOOGLE_CLOUD_CPP_PRIVATE_INCLUDES "${GOOGLE_CLOUD_CPP_DIR}")
# Set the PROJECT_SOURCE_DIR so that all Google Cloud cmake files work
set(PROJECT_SOURCE_DIR_BAK ${PROJECT_SOURCE_DIR})
set(PROJECT_SOURCE_DIR ${GOOGLE_CLOUD_CPP_DIR})
list(APPEND CMAKE_MODULE_PATH "${GOOGLE_CLOUD_CPP_DIR}/cmake")
# Building this target results in all protobufs being compiled.
add_custom_target(google-cloud-cpp-protos)
include("GoogleCloudCppLibrary")
# Set some variables required for googleapis CMakeLists.txt to work.
set(GOOGLE_CLOUD_CPP_ENABLE_GRPC ON)
set(PROJECT_VERSION "1")
set(PROJECT_VERSION_MAJOR "1")
set(PROTO_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf/src")
set(GOOGLE_CLOUD_CPP_GRPC_PLUGIN_EXECUTABLE $<TARGET_FILE:grpc_cpp_plugin>)
include(GoogleApis.cmake)
add_library(gRPC::grpc++ ALIAS _ch_contrib_grpc)
add_library(gRPC::grpc ALIAS _ch_contrib_grpc)
# google-cloud-cpp-kms.
google_cloud_cpp_add_library_protos(kms)
include(google_cloud_cpp_common.cmake)
include(google_cloud_cpp_grpc_utils.cmake)
SET(GOOGLE_CLOUD_CPP_KMS_DIR "${GOOGLE_CLOUD_CPP_DIR}/google/cloud/kms")
file(GLOB GOOGLE_CLOUD_CPP_KMS_SRC
"${GOOGLE_CLOUD_CPP_KMS_DIR}/v1/*.cc"
"${GOOGLE_CLOUD_CPP_KMS_DIR}/v1/internal/*.cc"
"${GOOGLE_CLOUD_CPP_KMS_DIR}/inventory/v1/*.cc"
)
list(APPEND GOOGLE_CLOUD_CPP_SOURCES ${GOOGLE_CLOUD_CPP_KMS_SRC})
list(APPEND GOOGLE_CLOUD_CPP_PUBLIC_INCLUDES "${GOOGLE_CLOUD_CPP_DIR}" "${CMAKE_CURRENT_BINARY_DIR}")
set(GRPC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc")
list(APPEND GOOGLE_CLOUD_CPP_PUBLIC_INCLUDES "${GRPC_INCLUDE_DIR}/include" "${GRPC_INCLUDE_DIR}/spm-cpp-include")
# Restore the PROJECT_SOURCE_DIR.
set(PROJECT_SOURCE_DIR ${PROJECT_SOURCE_DIR_BAK})
# Link against external libraries.
list(APPEND GOOGLE_CLOUD_CPP_PRIVATE_LIBS
google_cloud_cpp_common
google_cloud_cpp_grpc_utils
google_cloud_cpp_kms_protos
google_cloud_cpp_cloud_location_locations_protos
google_cloud_cpp_iam_v1_iam_policy_protos
gRPC::grpc++
absl::optional
)
list(APPEND GOOGLE_CLOUD_CPP_PUBLIC_LIBS
absl::optional
gRPC::grpc++
)
# Add library.
add_library(_gcloud ${GOOGLE_CLOUD_CPP_SOURCES})
target_include_directories(_gcloud SYSTEM PUBLIC ${GOOGLE_CLOUD_CPP_PUBLIC_INCLUDES})
target_include_directories(_gcloud SYSTEM PRIVATE ${GOOGLE_CLOUD_CPP_PRIVATE_INCLUDES})
target_link_libraries(_gcloud PRIVATE ${GOOGLE_CLOUD_CPP_PRIVATE_LIBS})
# The library is large - avoid bloat.
if (OMIT_HEAVY_DEBUG_SYMBOLS)
target_compile_options(_gcloud PRIVATE -g0)
endif()
add_library(ch_contrib::google_cloud_cpp ALIAS _gcloud)

View File

@ -0,0 +1,469 @@
# ~~~
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ~~~
# File copied from google-cloud-cpp/external/googleapis/CMakeLists.txt with minor modifications.
if (NOT GOOGLE_CLOUD_CPP_ENABLE_GRPC)
return()
endif ()
include(GoogleapisConfig)
set(GOOGLE_CLOUD_CPP_GOOGLEAPIS_URL
"https://github.com/googleapis/googleapis/archive/${_GOOGLE_CLOUD_CPP_GOOGLEAPIS_COMMIT_SHA}.tar.gz"
"https://storage.googleapis.com/cloud-cpp-community-archive/github.com/googleapis/googleapis/archive/${_GOOGLE_CLOUD_CPP_GOOGLEAPIS_COMMIT_SHA}.tar.gz"
)
set(GOOGLE_CLOUD_CPP_GOOGLEAPIS_URL_HASH
"${_GOOGLE_CLOUD_CPP_GOOGLEAPIS_SHA256}")
if (GOOGLE_CLOUD_CPP_OVERRIDE_GOOGLEAPIS_URL)
set(GOOGLE_CLOUD_CPP_GOOGLEAPIS_URL
${GOOGLE_CLOUD_CPP_OVERRIDE_GOOGLEAPIS_URL})
endif ()
if (GOOGLE_CLOUD_CPP_OVERRIDE_GOOGLEAPIS_URL_HASH)
set(GOOGLE_CLOUD_CPP_GOOGLEAPIS_URL_HASH
"${GOOGLE_CLOUD_CPP_OVERRIDE_GOOGLEAPIS_URL_HASH}")
endif ()
set(EXTERNAL_GOOGLEAPIS_PROTO_FILES
# cmake-format: sort
"google/api/annotations.proto"
"google/api/auth.proto"
"google/api/backend.proto"
"google/api/billing.proto"
"google/api/client.proto"
"google/api/config_change.proto"
"google/api/consumer.proto"
"google/api/context.proto"
"google/api/control.proto"
"google/api/distribution.proto"
"google/api/documentation.proto"
"google/api/endpoint.proto"
"google/api/error_reason.proto"
"google/api/field_behavior.proto"
"google/api/field_info.proto"
"google/api/http.proto"
"google/api/httpbody.proto"
"google/api/label.proto"
"google/api/launch_stage.proto"
"google/api/log.proto"
"google/api/logging.proto"
"google/api/metric.proto"
"google/api/monitored_resource.proto"
"google/api/monitoring.proto"
"google/api/policy.proto"
"google/api/quota.proto"
"google/api/resource.proto"
"google/api/routing.proto"
"google/api/service.proto"
"google/api/source_info.proto"
"google/api/system_parameter.proto"
"google/api/usage.proto"
"google/api/visibility.proto"
"google/cloud/extended_operations.proto"
"google/cloud/location/locations.proto"
# orgpolicy/v**1** is used *indirectly* by google/cloud/asset, therefore it
# does not appear in protolists/asset.list. In addition, it is not compiled
# by any other library. So, added manually.
"google/cloud/orgpolicy/v1/orgpolicy.proto"
# Some gRPC based authentication is implemented by the IAM Credentials
# service.
"google/iam/credentials/v1/common.proto"
"google/iam/credentials/v1/iamcredentials.proto"
# We expose google::iam::v1::Policy in our google::cloud::IAMUpdater
"google/iam/v1/iam_policy.proto"
"google/iam/v1/options.proto"
"google/iam/v1/policy.proto"
"google/longrunning/operations.proto"
"google/rpc/code.proto"
"google/rpc/context/attribute_context.proto"
"google/rpc/error_details.proto"
"google/rpc/status.proto"
"google/type/calendar_period.proto"
"google/type/color.proto"
"google/type/date.proto"
"google/type/datetime.proto"
"google/type/dayofweek.proto"
"google/type/decimal.proto"
"google/type/expr.proto"
"google/type/fraction.proto"
"google/type/interval.proto"
"google/type/latlng.proto"
"google/type/localized_text.proto"
"google/type/money.proto"
"google/type/month.proto"
"google/type/phone_number.proto"
"google/type/postal_address.proto"
"google/type/quaternion.proto"
"google/type/timeofday.proto")
include(GoogleCloudCppCommonOptions)
# Set EXTERNAL_GOOGLEAPIS_SOURCE in the parent directory, as it is used by all
# the generated libraries. The Conan packages (https://conan.io), will need to
# patch this value. Setting the value in a single place makes such patching
# easier.
set(EXTERNAL_GOOGLEAPIS_PREFIX "${PROJECT_BINARY_DIR}/external/googleapis")
set(EXTERNAL_GOOGLEAPIS_SOURCE
"${EXTERNAL_GOOGLEAPIS_PREFIX}/src/googleapis_download"
PARENT_SCOPE)
set(EXTERNAL_GOOGLEAPIS_SOURCE
"${EXTERNAL_GOOGLEAPIS_PREFIX}/src/googleapis_download")
# Include the functions to compile proto files and maintain proto libraries.
include(CompileProtos)
set(EXTERNAL_GOOGLEAPIS_BYPRODUCTS)
foreach (proto ${EXTERNAL_GOOGLEAPIS_PROTO_FILES})
list(APPEND EXTERNAL_GOOGLEAPIS_BYPRODUCTS
"${EXTERNAL_GOOGLEAPIS_SOURCE}/${proto}")
endforeach ()
file(GLOB protolists "protolists/*.list")
foreach (file IN LISTS protolists)
google_cloud_cpp_load_protolist(protos "${file}")
foreach (proto IN LISTS protos)
list(APPEND EXTERNAL_GOOGLEAPIS_BYPRODUCTS "${proto}")
endforeach ()
endforeach ()
include(ExternalProject)
# -- The build needs protobuf files. The original build scripts download them from a remote server (see target 'googleapis_download').
# This is too unreliable in the context of ClickHouse ... we instead ship the downloaded archive with the ClickHouse source and
# extract it into the build directory directly.
# Dummy googleapis_download target. This needs to exist because lots of other targets depend on it
# We however trick it a little bit saying this target generates the ${EXTERNAL_GOOGLEAPIS_BYPRODUCTS} BYPRODUCTS when
# actually the following section is the one actually providing such BYPRODUCTS.
externalproject_add(
googleapis_download
EXCLUDE_FROM_ALL ON
PREFIX "${EXTERNAL_GOOGLEAPIS_PREFIX}"
PATCH_COMMAND ""
DOWNLOAD_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
BUILD_BYPRODUCTS ${EXTERNAL_GOOGLEAPIS_BYPRODUCTS}
LOG_DOWNLOAD OFF)
# Command that extracts the tarball into the proper dir
# Note: The hash must match the Google Cloud Api version, otherwise funny things will happen.
# Find the right hash in "strip-prefix" in MODULE.bazel in the subrepository
message(STATUS "Extracting googleapis tarball")
set(PB_HASH "e60db19f11f94175ac682c5898cce0f77cc508ea")
set(PB_ARCHIVE "${PB_HASH}.tar.gz")
set(PB_DIR "googleapis-${PB_HASH}")
file(ARCHIVE_EXTRACT INPUT
"${ClickHouse_SOURCE_DIR}/contrib/google-cloud-cpp-cmake/googleapis/${PB_ARCHIVE}"
DESTINATION
"${EXTERNAL_GOOGLEAPIS_PREFIX}/tmp")
file(REMOVE_RECURSE "${EXTERNAL_GOOGLEAPIS_SOURCE}")
file(RENAME
"${EXTERNAL_GOOGLEAPIS_PREFIX}/tmp/${PB_DIR}"
"${EXTERNAL_GOOGLEAPIS_SOURCE}"
)
google_cloud_cpp_find_proto_include_dir(PROTO_INCLUDE_DIR)
google_cloud_cpp_add_protos_property()
function (external_googleapis_short_name var proto)
string(REPLACE "google/" "" short_name "${proto}")
string(REPLACE "/" "_" short_name "${short_name}")
string(REPLACE ".proto" "_protos" short_name "${short_name}")
set("${var}"
"${short_name}"
PARENT_SCOPE)
endfunction ()
# Create a single source proto library.
#
# * proto: the filename for the proto source.
# * (optional) ARGN: proto libraries the new library depends on.
function (external_googleapis_add_library proto)
external_googleapis_short_name(short_name "${proto}")
google_cloud_cpp_grpcpp_library(
google_cloud_cpp_${short_name} "${EXTERNAL_GOOGLEAPIS_SOURCE}/${proto}"
PROTO_PATH_DIRECTORIES "${EXTERNAL_GOOGLEAPIS_SOURCE}"
"${PROTO_INCLUDE_DIR}")
external_googleapis_set_version_and_alias("${short_name}")
set(public_deps)
foreach (dep_short_name ${ARGN})
list(APPEND public_deps "google-cloud-cpp::${dep_short_name}")
endforeach ()
list(LENGTH public_deps public_deps_length)
if (public_deps_length EQUAL 0)
target_link_libraries("google_cloud_cpp_${short_name}")
else ()
target_link_libraries("google_cloud_cpp_${short_name}"
PUBLIC ${public_deps})
endif ()
endfunction ()
function (external_googleapis_set_version_and_alias short_name)
add_dependencies("google_cloud_cpp_${short_name}" googleapis_download)
set_target_properties(
"google_cloud_cpp_${short_name}"
PROPERTIES EXPORT_NAME google-cloud-cpp::${short_name}
VERSION "${PROJECT_VERSION}"
SOVERSION ${PROJECT_VERSION_MAJOR})
add_library("google-cloud-cpp::${short_name}" ALIAS
"google_cloud_cpp_${short_name}")
endfunction ()
if (GOOGLE_CLOUD_CPP_USE_INSTALLED_COMMON)
return()
endif ()
# Avoid adding new proto libraries to this list as these libraries are always
# installed, regardless of whether or not they are needed. See #8022 for more
# details.
set(external_googleapis_installed_libraries_list
# cmake-format: sort
google_cloud_cpp_cloud_common_common_protos
google_cloud_cpp_iam_credentials_v1_common_protos
google_cloud_cpp_iam_credentials_v1_iamcredentials_protos
google_cloud_cpp_iam_v1_iam_policy_protos
google_cloud_cpp_iam_v1_options_protos
google_cloud_cpp_iam_v1_policy_protos
google_cloud_cpp_longrunning_operations_protos)
# These proto files cannot be added in the foreach() loop because they have
# dependencies.
set(PROTO_FILES_WITH_DEPENDENCIES
# cmake-format: sort
"google/api/annotations.proto"
"google/api/auth.proto"
"google/api/billing.proto"
"google/api/client.proto"
"google/api/control.proto"
"google/api/distribution.proto"
"google/api/endpoint.proto"
"google/api/log.proto"
"google/api/logging.proto"
"google/api/metric.proto"
"google/api/monitored_resource.proto"
"google/api/monitoring.proto"
"google/api/quota.proto"
"google/api/service.proto"
"google/api/usage.proto"
"google/cloud/location/locations.proto"
"google/rpc/status.proto")
# For some directories *most* (but not all) the proto files are simple enough
# that the libraries can be generated with a foreach() loop.
foreach (proto IN LISTS EXTERNAL_GOOGLEAPIS_PROTO_FILES)
if (proto MATCHES "^google/api/"
OR proto MATCHES "^google/type"
OR proto MATCHES "^google/rpc/"
OR proto MATCHES "^google/cloud/")
external_googleapis_short_name(short_name "${proto}")
list(APPEND external_googleapis_installed_libraries_list
google_cloud_cpp_${short_name})
list(FIND PROTO_FILES_WITH_DEPENDENCIES "${proto}" has_dependency)
if (has_dependency EQUAL -1)
external_googleapis_add_library("${proto}")
endif ()
endif ()
endforeach ()
# Out of order because they have dependencies.
external_googleapis_add_library("google/api/annotations.proto" api_http_protos)
external_googleapis_add_library("google/api/auth.proto" api_annotations_protos)
external_googleapis_add_library("google/api/client.proto"
api_launch_stage_protos)
external_googleapis_add_library("google/api/control.proto" api_policy_protos)
external_googleapis_add_library("google/api/metric.proto"
api_launch_stage_protos api_label_protos)
external_googleapis_add_library("google/api/billing.proto"
api_annotations_protos api_metric_protos)
external_googleapis_add_library("google/api/distribution.proto"
api_annotations_protos)
external_googleapis_add_library("google/api/endpoint.proto"
api_annotations_protos)
external_googleapis_add_library("google/api/log.proto" api_label_protos)
external_googleapis_add_library("google/api/logging.proto"
api_annotations_protos api_label_protos)
external_googleapis_add_library("google/api/monitored_resource.proto"
api_launch_stage_protos api_label_protos)
external_googleapis_add_library("google/api/monitoring.proto"
api_annotations_protos)
external_googleapis_add_library("google/api/quota.proto" api_annotations_protos)
external_googleapis_add_library("google/api/usage.proto" api_annotations_protos
api_visibility_protos)
external_googleapis_add_library(
"google/api/service.proto"
api_annotations_protos
api_auth_protos
api_backend_protos
api_billing_protos
api_client_protos
api_context_protos
api_control_protos
api_documentation_protos
api_endpoint_protos
api_http_protos
api_label_protos
api_log_protos
api_logging_protos
api_metric_protos
api_monitored_resource_protos
api_monitoring_protos
api_quota_protos
api_resource_protos
api_source_info_protos
api_system_parameter_protos
api_usage_protos)
external_googleapis_add_library("google/cloud/location/locations.proto"
api_annotations_protos api_client_protos)
external_googleapis_add_library("google/iam/v1/options.proto"
api_annotations_protos)
external_googleapis_add_library("google/iam/v1/policy.proto"
api_annotations_protos type_expr_protos)
external_googleapis_add_library("google/rpc/status.proto"
rpc_error_details_protos)
external_googleapis_add_library(
"google/longrunning/operations.proto" api_annotations_protos
api_client_protos rpc_status_protos)
external_googleapis_add_library(
"google/iam/v1/iam_policy.proto"
api_annotations_protos
api_client_protos
api_field_behavior_protos
api_resource_protos
iam_v1_options_protos
iam_v1_policy_protos)
external_googleapis_add_library("google/iam/credentials/v1/common.proto"
api_field_behavior_protos api_resource_protos)
external_googleapis_add_library(
"google/iam/credentials/v1/iamcredentials.proto" api_annotations_protos
api_client_protos iam_credentials_v1_common_protos)
google_cloud_cpp_load_protolist(cloud_common_list "${GOOGLE_CLOUD_CPP_DIR}/external/googleapis/protolists/common.list")
google_cloud_cpp_load_protodeps(cloud_common_deps "${GOOGLE_CLOUD_CPP_DIR}/external/googleapis/protodeps/common.deps")
google_cloud_cpp_grpcpp_library(
google_cloud_cpp_cloud_common_common_protos ${cloud_common_list}
PROTO_PATH_DIRECTORIES "${EXTERNAL_GOOGLEAPIS_SOURCE}"
"${PROTO_INCLUDE_DIR}")
external_googleapis_set_version_and_alias(cloud_common_common_protos)
target_link_libraries(google_cloud_cpp_cloud_common_common_protos
PUBLIC ${cloud_common_deps})
# Install the libraries and headers in the locations determined by
# GNUInstallDirs
include(GNUInstallDirs)
install(
TARGETS ${external_googleapis_installed_libraries_list}
EXPORT googleapis-targets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
COMPONENT google_cloud_cpp_runtime
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
COMPONENT google_cloud_cpp_runtime
NAMELINK_COMPONENT google_cloud_cpp_development
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
COMPONENT google_cloud_cpp_development)
foreach (target ${external_googleapis_installed_libraries_list})
google_cloud_cpp_install_proto_library_headers("${target}")
google_cloud_cpp_install_proto_library_protos(
"${target}" "${EXTERNAL_GOOGLEAPIS_SOURCE}")
endforeach ()
# Create and install the pkg-config files.
foreach (target ${external_googleapis_installed_libraries_list})
external_googleapis_install_pc("${target}")
endforeach ()
# Create and install the googleapis pkg-config file for backwards compatibility.
set(GOOGLE_CLOUD_CPP_PC_LIBS "")
google_cloud_cpp_set_pkgconfig_paths()
set(GOOGLE_CLOUD_CPP_PC_NAME "The Google APIS C++ Proto Library")
set(GOOGLE_CLOUD_CPP_PC_DESCRIPTION
"Provides C++ APIs to access Google Cloud Platforms.")
# This list is for backwards compatibility purposes only. DO NOT add new
# libraries to it.
string(
JOIN
" "
GOOGLE_CLOUD_CPP_PC_REQUIRES
"google_cloud_cpp_bigtable_protos"
"google_cloud_cpp_cloud_bigquery_protos"
"google_cloud_cpp_iam_protos"
"google_cloud_cpp_pubsub_protos"
"google_cloud_cpp_storage_protos"
"google_cloud_cpp_logging_protos"
"google_cloud_cpp_iam_v1_iam_policy_protos"
"google_cloud_cpp_iam_v1_options_protos"
"google_cloud_cpp_iam_v1_policy_protos"
"google_cloud_cpp_longrunning_operations_protos"
"google_cloud_cpp_api_auth_protos"
"google_cloud_cpp_api_annotations_protos"
"google_cloud_cpp_api_client_protos"
"google_cloud_cpp_api_field_behavior_protos"
"google_cloud_cpp_api_http_protos"
"google_cloud_cpp_rpc_status_protos"
"google_cloud_cpp_rpc_error_details_protos"
"google_cloud_cpp_type_expr_protos"
"grpc++"
"grpc"
"openssl"
"protobuf"
"zlib"
"libcares")
set(GOOGLE_CLOUD_CPP_PC_LIBS "")
google_cloud_cpp_set_pkgconfig_paths()
configure_file("${PROJECT_SOURCE_DIR}/cmake/templates/config.pc.in"
"googleapis.pc" @ONLY)
install(
FILES "${CMAKE_CURRENT_BINARY_DIR}/googleapis.pc"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig"
COMPONENT google_cloud_cpp_development)
# Create and install the CMake configuration files.
# include(CMakePackageConfigHelpers)
# configure_file("${CMAKE_CURRENT_LIST_DIR}/config.cmake.in"
# "google_cloud_cpp_googleapis-config.cmake" @ONLY)
# write_basic_package_version_file(
# "google_cloud_cpp_googleapis-config-version.cmake"
# VERSION ${PROJECT_VERSION}
# COMPATIBILITY ExactVersion)
# Export the CMake targets to make it easy to create configuration files.
# install(
# EXPORT googleapis-targets
# DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/google_cloud_cpp_googleapis"
# COMPONENT google_cloud_cpp_development)
# install(
# FILES
# "${CMAKE_CURRENT_BINARY_DIR}/google_cloud_cpp_googleapis-config.cmake"
# "${CMAKE_CURRENT_BINARY_DIR}/google_cloud_cpp_googleapis-config-version.cmake"
# "${PROJECT_SOURCE_DIR}/cmake/FindgRPC.cmake"
# "${PROJECT_SOURCE_DIR}/cmake/CompileProtos.cmake"
# DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/google_cloud_cpp_googleapis"
# COMPONENT google_cloud_cpp_development)

View File

@ -0,0 +1,447 @@
# ~~~
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ~~~
# File copied from google-cloud-cpp/google-cloud-cpp/google_cloud_cpp_common.cmake with minor modifications.
set(GOOGLE_CLOUD_CPP_COMMON_DIR "${GOOGLE_CLOUD_CPP_DIR}/google/cloud")
# Generate the version information from the CMake values.
# configure_file(${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/version_info.h.in
# ${CMAKE_CURRENT_SOURCE_DIR}/internal/version_info.h)
# Create the file that captures build information. Having access to the compiler
# and build flags at runtime allows us to print better benchmark results.
string(TOUPPER "${CMAKE_BUILD_TYPE}" GOOGLE_CLOUD_CPP_BUILD_TYPE_UPPER)
configure_file(${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/build_info.cc.in internal/build_info.cc)
# the client library
add_library(
google_cloud_cpp_common # cmake-format: sort
${CMAKE_CURRENT_BINARY_DIR}/internal/build_info.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/access_token.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/access_token.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/backoff_policy.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/common_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/credentials.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/credentials.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/experimental_tag.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/future.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/future_generic.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/future_void.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/idempotency.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/absl_str_cat_quiet.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/absl_str_join_quiet.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/absl_str_replace_quiet.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/algorithm.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/api_client_header.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/api_client_header.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/attributes.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/auth_header_error.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/auth_header_error.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/backoff_policy.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/backoff_policy.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/base64_transforms.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/base64_transforms.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/big_endian.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/build_info.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/call_context.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/clock.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/compiler_info.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/compiler_info.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/compute_engine_util.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/compute_engine_util.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/credentials_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/credentials_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/debug_future_status.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/debug_future_status.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/debug_string.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/debug_string.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/detect_gcp.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/detect_gcp_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/detect_gcp_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/diagnostics_pop.inc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/diagnostics_push.inc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/disable_deprecation_warnings.inc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/disable_msvc_crt_secure_warnings.inc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/error_context.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/error_context.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/filesystem.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/filesystem.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/format_time_point.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/format_time_point.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/future_base.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/future_coroutines.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/future_fwd.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/future_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/future_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/future_then_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/future_then_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/getenv.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/getenv.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/group_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/invocation_id_generator.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/invocation_id_generator.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/invoke_result.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/ios_flags_saver.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/log_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/log_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/make_status.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/make_status.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/noexcept_action.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/noexcept_action.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/non_constructible.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/opentelemetry.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/opentelemetry.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/opentelemetry_context.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/opentelemetry_context.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/pagination_range.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/parse_rfc3339.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/parse_rfc3339.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/populate_common_options.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/populate_common_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/port_platform.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/random.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/random.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/retry_info.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/retry_loop_helpers.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/retry_loop_helpers.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/retry_policy_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/retry_policy_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/service_endpoint.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/service_endpoint.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/sha256_hash.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/sha256_hash.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/sha256_hmac.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/sha256_hmac.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/sha256_type.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/status_payload_keys.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/status_payload_keys.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/status_utils.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/status_utils.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/strerror.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/strerror.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/subject_token.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/subject_token.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/throw_delegate.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/throw_delegate.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/timer_queue.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/timer_queue.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/trace_propagator.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/trace_propagator.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/traced_stream_range.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/tuple.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/type_list.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/type_traits.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/url_encode.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/url_encode.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/user_agent_prefix.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/user_agent_prefix.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/utility.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/version_info.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/kms_key_name.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/kms_key_name.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/location.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/location.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/log.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/log.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/no_await_tag.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/opentelemetry_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/optional.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/options.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/polling_policy.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/project.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/project.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/retry_policy.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/rpc_metadata.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/status.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/status.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/status_or.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/stream_range.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/terminate_handler.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/terminate_handler.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/tracing_options.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/tracing_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/universe_domain_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/version.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/version.h)
target_link_libraries(
google_cloud_cpp_common
PUBLIC absl::base
absl::memory
absl::optional
absl::span
absl::str_format
absl::time
absl::variant
Threads::Threads)
if (WIN32)
target_compile_definitions(google_cloud_cpp_common
PRIVATE WIN32_LEAN_AND_MEAN)
target_link_libraries(google_cloud_cpp_common PUBLIC bcrypt)
else ()
target_link_libraries(google_cloud_cpp_common PUBLIC OpenSSL::Crypto ch_contrib::re2)
endif ()
google_cloud_cpp_add_common_options(google_cloud_cpp_common)
target_include_directories(
google_cloud_cpp_common PUBLIC $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}>
$<INSTALL_INTERFACE:include>)
# We're putting generated code into ${PROJECT_BINARY_DIR} (e.g. compiled
# protobufs or build info), so we need it on the include path, however we don't
# want it checked by linters so we mark it as SYSTEM.
target_include_directories(google_cloud_cpp_common SYSTEM
PUBLIC $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}>)
target_compile_options(google_cloud_cpp_common
PUBLIC ${GOOGLE_CLOUD_CPP_EXCEPTIONS_FLAG})
set_target_properties(
google_cloud_cpp_common
PROPERTIES EXPORT_NAME "google-cloud-cpp::common"
VERSION ${PROJECT_VERSION}
SOVERSION ${PROJECT_VERSION_MAJOR})
add_library(google-cloud-cpp::common ALIAS google_cloud_cpp_common)
#create_bazel_config(google_cloud_cpp_common YEAR 2018)
# # Export the CMake targets to make it easy to create configuration files.
# install(
# EXPORT google_cloud_cpp_common-targets
# DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/google_cloud_cpp_common"
# COMPONENT google_cloud_cpp_development)
# # Install the libraries and headers in the locations determined by
# # GNUInstallDirs
# install(
# TARGETS google_cloud_cpp_common
# EXPORT google_cloud_cpp_common-targets
# RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
# COMPONENT google_cloud_cpp_runtime
# LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
# COMPONENT google_cloud_cpp_runtime
# NAMELINK_COMPONENT google_cloud_cpp_development
# ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
# COMPONENT google_cloud_cpp_development)
#google_cloud_cpp_install_headers(google_cloud_cpp_common include/google/cloud)
# google_cloud_cpp_add_pkgconfig(
# "common"
# "Google Cloud C++ Client Library Common Components"
# "Common Components used by the Google Cloud C++ Client Libraries."
# "absl_optional"
# "absl_span"
# "absl_strings"
# "absl_time"
# "absl_time_zone"
# "absl_variant"
# "${GOOGLE_CLOUD_CPP_OPENTELEMETRY_API}"
# NON_WIN32_REQUIRES
# openssl
# WIN32_LIBS
# bcrypt)
# Create and install the CMake configuration files.
# configure_file("config.cmake.in" "google_cloud_cpp_common-config.cmake" @ONLY)
# write_basic_package_version_file(
# "google_cloud_cpp_common-config-version.cmake"
# VERSION ${PROJECT_VERSION}
# COMPATIBILITY ExactVersion)
# install(
# FILES
# "${CMAKE_CURRENT_BINARY_DIR}/google_cloud_cpp_common-config.cmake"
# "${CMAKE_CURRENT_BINARY_DIR}/google_cloud_cpp_common-config-version.cmake"
# DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/google_cloud_cpp_common"
# COMPONENT google_cloud_cpp_development)
# if (GOOGLE_CLOUD_CPP_WITH_MOCKS)
# # Create a header-only library for the mocks. We use a CMake `INTERFACE`
# # library for these, a regular library would not work on macOS (where the
# # library needs at least one .o file).
# add_library(google_cloud_cpp_mocks INTERFACE)
# set(google_cloud_cpp_mocks_hdrs
# # cmake-format: sort
# mocks/current_options.h mocks/mock_async_streaming_read_write_rpc.h
# mocks/mock_stream_range.h)
# export_list_to_bazel("google_cloud_cpp_mocks.bzl"
# "google_cloud_cpp_mocks_hdrs" YEAR "2022")
# target_link_libraries(
# google_cloud_cpp_mocks INTERFACE google-cloud-cpp::common GTest::gmock
# GTest::gtest)
# set_target_properties(google_cloud_cpp_mocks
# PROPERTIES EXPORT_NAME google-cloud-cpp::mocks)
# target_include_directories(
# google_cloud_cpp_mocks
# INTERFACE $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}>
# $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}>
# $<INSTALL_INTERFACE:include>)
# target_compile_options(google_cloud_cpp_mocks
# INTERFACE ${GOOGLE_CLOUD_CPP_EXCEPTIONS_FLAG})
# add_library(google-cloud-cpp::mocks ALIAS google_cloud_cpp_mocks)
# install(
# FILES ${google_cloud_cpp_mocks_hdrs}
# DESTINATION "include/google/cloud/mocks"
# COMPONENT google_cloud_cpp_development)
# # Export the CMake targets to make it easy to create configuration files.
# install(
# EXPORT google_cloud_cpp_mocks-targets
# DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/google_cloud_cpp_mocks"
# COMPONENT google_cloud_cpp_development)
# install(
# TARGETS google_cloud_cpp_mocks
# EXPORT google_cloud_cpp_mocks-targets
# COMPONENT google_cloud_cpp_development)
# google_cloud_cpp_add_pkgconfig(
# "mocks" "Google Cloud C++ Testing Library"
# "Helpers for testing the Google Cloud C++ Client Libraries"
# "google_cloud_cpp_common" "gmock")
# # Create and install the CMake configuration files.
# configure_file("mocks-config.cmake.in"
# "google_cloud_cpp_mocks-config.cmake" @ONLY)
# write_basic_package_version_file(
# "google_cloud_cpp_mocks-config-version.cmake"
# VERSION ${PROJECT_VERSION}
# COMPATIBILITY ExactVersion)
# install(
# FILES
# "${CMAKE_CURRENT_BINARY_DIR}/google_cloud_cpp_mocks-config.cmake"
# "${CMAKE_CURRENT_BINARY_DIR}/google_cloud_cpp_mocks-config-version.cmake"
# DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/google_cloud_cpp_mocks"
# COMPONENT google_cloud_cpp_development)
# endif ()
# if (BUILD_TESTING)
# include(FindBenchmarkWithWorkarounds)
# set(google_cloud_cpp_common_unit_tests
# # cmake-format: sort
# access_token_test.cc
# common_options_test.cc
# future_coroutines_test.cc
# future_generic_test.cc
# future_generic_then_test.cc
# future_void_test.cc
# future_void_then_test.cc
# internal/algorithm_test.cc
# internal/api_client_header_test.cc
# internal/backoff_policy_test.cc
# internal/base64_transforms_test.cc
# internal/big_endian_test.cc
# internal/call_context_test.cc
# internal/clock_test.cc
# internal/compiler_info_test.cc
# internal/compute_engine_util_test.cc
# internal/credentials_impl_test.cc
# internal/debug_future_status_test.cc
# internal/debug_string_test.cc
# internal/detect_gcp_test.cc
# internal/error_context_test.cc
# internal/filesystem_test.cc
# internal/format_time_point_test.cc
# internal/future_impl_test.cc
# internal/future_then_impl_test.cc
# internal/group_options_test.cc
# internal/invocation_id_generator_test.cc
# internal/invoke_result_test.cc
# internal/log_impl_test.cc
# internal/make_status_test.cc
# internal/noexcept_action_test.cc
# internal/opentelemetry_context_test.cc
# internal/opentelemetry_test.cc
# internal/pagination_range_test.cc
# internal/parse_rfc3339_test.cc
# internal/populate_common_options_test.cc
# internal/random_test.cc
# internal/retry_loop_helpers_test.cc
# internal/retry_policy_impl_test.cc
# internal/service_endpoint_test.cc
# internal/sha256_hash_test.cc
# internal/sha256_hmac_test.cc
# internal/status_payload_keys_test.cc
# internal/status_utils_test.cc
# internal/strerror_test.cc
# internal/subject_token_test.cc
# internal/throw_delegate_test.cc
# internal/timer_queue_test.cc
# internal/trace_propagator_test.cc
# internal/traced_stream_range_test.cc
# internal/tuple_test.cc
# internal/type_list_test.cc
# internal/url_encode_test.cc
# internal/user_agent_prefix_test.cc
# internal/utility_test.cc
# kms_key_name_test.cc
# location_test.cc
# log_test.cc
# mocks/current_options_test.cc
# mocks/mock_stream_range_test.cc
# options_test.cc
# polling_policy_test.cc
# project_test.cc
# status_or_test.cc
# status_test.cc
# stream_range_test.cc
# terminate_handler_test.cc
# tracing_options_test.cc)
# # Export the list of unit tests so the Bazel BUILD file can pick it up.
# export_list_to_bazel("google_cloud_cpp_common_unit_tests.bzl"
# "google_cloud_cpp_common_unit_tests" YEAR "2018")
# foreach (fname ${google_cloud_cpp_common_unit_tests})
# google_cloud_cpp_add_executable(target "common" "${fname}")
# target_link_libraries(
# ${target}
# PRIVATE google_cloud_cpp_testing
# google-cloud-cpp::common
# google-cloud-cpp::mocks
# absl::variant
# GTest::gmock_main
# GTest::gmock
# GTest::gtest)
# google_cloud_cpp_add_common_options(${target})
# add_test(NAME ${target} COMMAND ${target})
# endforeach ()
# set(google_cloud_cpp_common_benchmarks # cmake-format: sort
# options_benchmark.cc)
# # Export the list of benchmarks to a .bzl file so we do not need to maintain
# # the list in two places.
# export_list_to_bazel("google_cloud_cpp_common_benchmarks.bzl"
# "google_cloud_cpp_common_benchmarks" YEAR "2020")
# # Generate a target for each benchmark.
# foreach (fname ${google_cloud_cpp_common_benchmarks})
# google_cloud_cpp_add_executable(target "common" "${fname}")
# add_test(NAME ${target} COMMAND ${target})
# target_link_libraries(${target} PRIVATE google-cloud-cpp::common
# benchmark::benchmark_main)
# google_cloud_cpp_add_common_options(${target})
# endforeach ()
# endif ()
# if (BUILD_TESTING AND GOOGLE_CLOUD_CPP_ENABLE_CXX_EXCEPTIONS)
# google_cloud_cpp_add_samples_relative("common" "samples/")
# endif ()

View File

@ -0,0 +1,350 @@
# ~~~
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ~~~
# File copied from google-cloud-cpp/google-cloud-cpp/google_cloud_cpp_grpc_utils.cmake with minor modifications.
set(GOOGLE_CLOUD_CPP_COMMON_DIR "${GOOGLE_CLOUD_CPP_DIR}/google/cloud")
# the library
add_library(
google_cloud_cpp_grpc_utils # cmake-format: sort
${GOOGLE_CLOUD_CPP_COMMON_DIR}/async_operation.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/async_streaming_read_write_rpc.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/background_threads.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/completion_queue.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/completion_queue.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/connection_options.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/connection_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/grpc_error_delegate.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/grpc_error_delegate.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/grpc_options.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/grpc_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/grpc_utils/async_operation.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/grpc_utils/completion_queue.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/grpc_utils/grpc_error_delegate.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/grpc_utils/version.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/iam_updater.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_connection_ready.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_connection_ready.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_long_running_operation.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_polling_loop.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_polling_loop.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_read_stream_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_read_write_stream_auth.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_read_write_stream_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_read_write_stream_logging.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_read_write_stream_timeout.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_read_write_stream_tracing.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_retry_loop.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_retry_unary_rpc.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_rpc_details.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_read_rpc.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_read_rpc_auth.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_read_rpc_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_read_rpc_logging.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_read_rpc_timeout.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_read_rpc_tracing.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_write_rpc.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_write_rpc_auth.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_write_rpc_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_write_rpc_logging.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_write_rpc_timeout.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/async_streaming_write_rpc_tracing.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/background_threads_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/background_threads_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/completion_queue_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/debug_string_protobuf.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/debug_string_protobuf.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/debug_string_status.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/debug_string_status.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/default_completion_queue_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/default_completion_queue_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/extract_long_running_result.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/extract_long_running_result.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_access_token_authentication.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_access_token_authentication.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_api_key_authentication.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_api_key_authentication.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_async_access_token_cache.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_async_access_token_cache.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_channel_credentials_authentication.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_channel_credentials_authentication.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_impersonate_service_account.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_impersonate_service_account.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_metadata_view.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_opentelemetry.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_opentelemetry.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_request_metadata.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_request_metadata.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_service_account_authentication.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/grpc_service_account_authentication.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/log_wrapper.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/log_wrapper.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/minimal_iam_credentials_stub.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/minimal_iam_credentials_stub.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/populate_grpc_options.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/populate_grpc_options.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/resumable_streaming_read_rpc.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/retry_loop.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/routing_matcher.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/setup_context.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_read_rpc.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_read_rpc.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_read_rpc_logging.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_read_rpc_tracing.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_write_rpc.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_write_rpc_impl.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_write_rpc_impl.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_write_rpc_logging.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/streaming_write_rpc_tracing.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/time_utils.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/time_utils.h
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/unified_grpc_credentials.cc
${GOOGLE_CLOUD_CPP_COMMON_DIR}/internal/unified_grpc_credentials.h)
target_link_libraries(
google_cloud_cpp_grpc_utils
PUBLIC absl::function_ref
absl::memory
absl::time
absl::variant
google-cloud-cpp::iam_credentials_v1_iamcredentials_protos
google-cloud-cpp::iam_v1_policy_protos
google-cloud-cpp::longrunning_operations_protos
google-cloud-cpp::iam_v1_iam_policy_protos
google-cloud-cpp::rpc_error_details_protos
google-cloud-cpp::rpc_status_protos
google-cloud-cpp::common
gRPC::grpc++
gRPC::grpc)
google_cloud_cpp_add_common_options(google_cloud_cpp_grpc_utils)
target_include_directories(
google_cloud_cpp_grpc_utils PUBLIC $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}>
$<INSTALL_INTERFACE:include>)
target_compile_options(google_cloud_cpp_grpc_utils
PUBLIC ${GOOGLE_CLOUD_CPP_EXCEPTIONS_FLAG})
set_target_properties(
google_cloud_cpp_grpc_utils
PROPERTIES EXPORT_NAME "google-cloud-cpp::grpc_utils"
VERSION ${PROJECT_VERSION}
SOVERSION ${PROJECT_VERSION_MAJOR})
add_library(google-cloud-cpp::grpc_utils ALIAS google_cloud_cpp_grpc_utils)
#create_bazel_config(google_cloud_cpp_grpc_utils YEAR 2019)
# # Install the libraries and headers in the locations determined by
# # GNUInstallDirs
# install(
# TARGETS
# EXPORT grpc_utils-targets
# RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
# LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
# ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
# COMPONENT google_cloud_cpp_development)
# # Export the CMake targets to make it easy to create configuration files.
# install(
# EXPORT grpc_utils-targets
# DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/google_cloud_cpp_grpc_utils"
# COMPONENT google_cloud_cpp_development)
# install(
# TARGETS google_cloud_cpp_grpc_utils
# EXPORT grpc_utils-targets
# RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
# COMPONENT google_cloud_cpp_runtime
# LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
# COMPONENT google_cloud_cpp_runtime
# NAMELINK_COMPONENT google_cloud_cpp_development
# ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
# COMPONENT google_cloud_cpp_development)
# google_cloud_cpp_install_headers(google_cloud_cpp_grpc_utils
# include/google/cloud)
# google_cloud_cpp_add_pkgconfig(
# grpc_utils
# "gRPC Utilities for the Google Cloud C++ Client Library"
# "Provides gRPC Utilities for the Google Cloud C++ Client Library."
# "google_cloud_cpp_common"
# "google_cloud_cpp_iam_credentials_v1_iamcredentials_protos"
# "google_cloud_cpp_iam_v1_policy_protos"
# "google_cloud_cpp_iam_v1_iam_policy_protos"
# "google_cloud_cpp_longrunning_operations_protos"
# "google_cloud_cpp_rpc_status_protos"
# "absl_function_ref"
# "absl_strings"
# "absl_time"
# "absl_time_zone"
# "absl_variant"
# "openssl")
# # Create and install the CMake configuration files.
# configure_file("grpc_utils/config.cmake.in"
# "google_cloud_cpp_grpc_utils-config.cmake" @ONLY)
# write_basic_package_version_file(
# "google_cloud_cpp_grpc_utils-config-version.cmake"
# VERSION ${PROJECT_VERSION}
# COMPATIBILITY ExactVersion)
# install(
# FILES
# "${CMAKE_CURRENT_BINARY_DIR}/google_cloud_cpp_grpc_utils-config.cmake"
# "${CMAKE_CURRENT_BINARY_DIR}/google_cloud_cpp_grpc_utils-config-version.cmake"
# DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/google_cloud_cpp_grpc_utils"
# COMPONENT google_cloud_cpp_development)
# function (google_cloud_cpp_grpc_utils_add_test fname labels)
# google_cloud_cpp_add_executable(target "common" "${fname}")
# target_link_libraries(
# ${target}
# PRIVATE google-cloud-cpp::grpc_utils
# google_cloud_cpp_testing_grpc
# google_cloud_cpp_testing
# google-cloud-cpp::common
# absl::variant
# GTest::gmock_main
# GTest::gmock
# GTest::gtest
# gRPC::grpc++
# gRPC::grpc)
# google_cloud_cpp_add_common_options(${target})
# add_test(NAME ${target} COMMAND ${target})
# set_tests_properties(${target} PROPERTIES LABELS "${labels}")
# endfunction ()
# if (BUILD_TESTING)
# include(FindBenchmarkWithWorkarounds)
# # List the unit tests, then setup the targets and dependencies.
# set(google_cloud_cpp_grpc_utils_unit_tests
# # cmake-format: sort
# completion_queue_test.cc
# connection_options_test.cc
# grpc_error_delegate_test.cc
# grpc_options_test.cc
# internal/async_connection_ready_test.cc
# internal/async_long_running_operation_test.cc
# internal/async_polling_loop_test.cc
# internal/async_read_write_stream_auth_test.cc
# internal/async_read_write_stream_impl_test.cc
# internal/async_read_write_stream_logging_test.cc
# internal/async_read_write_stream_timeout_test.cc
# internal/async_read_write_stream_tracing_test.cc
# internal/async_retry_loop_test.cc
# internal/async_retry_unary_rpc_test.cc
# internal/async_streaming_read_rpc_auth_test.cc
# internal/async_streaming_read_rpc_impl_test.cc
# internal/async_streaming_read_rpc_logging_test.cc
# internal/async_streaming_read_rpc_timeout_test.cc
# internal/async_streaming_read_rpc_tracing_test.cc
# internal/async_streaming_write_rpc_auth_test.cc
# internal/async_streaming_write_rpc_impl_test.cc
# internal/async_streaming_write_rpc_logging_test.cc
# internal/async_streaming_write_rpc_timeout_test.cc
# internal/async_streaming_write_rpc_tracing_test.cc
# internal/background_threads_impl_test.cc
# internal/debug_string_protobuf_test.cc
# internal/debug_string_status_test.cc
# internal/extract_long_running_result_test.cc
# internal/grpc_access_token_authentication_test.cc
# internal/grpc_async_access_token_cache_test.cc
# internal/grpc_channel_credentials_authentication_test.cc
# internal/grpc_opentelemetry_test.cc
# internal/grpc_request_metadata_test.cc
# internal/grpc_service_account_authentication_test.cc
# internal/log_wrapper_test.cc
# internal/minimal_iam_credentials_stub_test.cc
# internal/populate_grpc_options_test.cc
# internal/resumable_streaming_read_rpc_test.cc
# internal/retry_loop_test.cc
# internal/routing_matcher_test.cc
# internal/streaming_read_rpc_logging_test.cc
# internal/streaming_read_rpc_test.cc
# internal/streaming_read_rpc_tracing_test.cc
# internal/streaming_write_rpc_logging_test.cc
# internal/streaming_write_rpc_test.cc
# internal/streaming_write_rpc_tracing_test.cc
# internal/time_utils_test.cc
# internal/unified_grpc_credentials_test.cc)
# # List the unit tests, then setup the targets and dependencies.
# set(google_cloud_cpp_grpc_utils_integration_tests
# # cmake-format: sort
# internal/grpc_impersonate_service_account_integration_test.cc)
# # Export the list of unit and integration tests so the Bazel BUILD file can
# # pick them up.
# export_list_to_bazel("google_cloud_cpp_grpc_utils_unit_tests.bzl"
# "google_cloud_cpp_grpc_utils_unit_tests" YEAR "2019")
# export_list_to_bazel(
# "google_cloud_cpp_grpc_utils_integration_tests.bzl"
# "google_cloud_cpp_grpc_utils_integration_tests" YEAR "2021")
# foreach (fname ${google_cloud_cpp_grpc_utils_unit_tests})
# google_cloud_cpp_grpc_utils_add_test("${fname}" "")
# endforeach ()
# # TODO(#12485) - remove dependency on bigtable in this integration test.
# if (NOT bigtable IN_LIST GOOGLE_CLOUD_CPP_ENABLE)
# list(REMOVE_ITEM google_cloud_cpp_grpc_utils_integration_tests
# "internal/grpc_impersonate_service_account_integration_test.cc")
# endif ()
# foreach (fname ${google_cloud_cpp_grpc_utils_integration_tests})
# google_cloud_cpp_add_executable(target "common" "${fname}")
# target_link_libraries(
# ${target}
# PRIVATE google-cloud-cpp::grpc_utils
# google_cloud_cpp_testing_grpc
# google_cloud_cpp_testing
# google-cloud-cpp::common
# google-cloud-cpp::iam_credentials_v1_iamcredentials_protos
# absl::variant
# GTest::gmock_main
# GTest::gmock
# GTest::gtest
# gRPC::grpc++
# gRPC::grpc)
# google_cloud_cpp_add_common_options(${target})
# add_test(NAME ${target} COMMAND ${target})
# set_tests_properties(${target} PROPERTIES LABELS
# "integration-test-production")
# # TODO(12485) - remove dep on bigtable_protos
# if (bigtable IN_LIST GOOGLE_CLOUD_CPP_ENABLE)
# target_link_libraries(${target}
# PRIVATE google-cloud-cpp::bigtable_protos)
# endif ()
# endforeach ()
# set(google_cloud_cpp_grpc_utils_benchmarks # cmake-format: sortable
# completion_queue_benchmark.cc)
# # Export the list of benchmarks to a .bzl file so we do not need to maintain
# # the list in two places.
# export_list_to_bazel("google_cloud_cpp_grpc_utils_benchmarks.bzl"
# "google_cloud_cpp_grpc_utils_benchmarks" YEAR "2020")
# # Generate a target for each benchmark.
# foreach (fname ${google_cloud_cpp_grpc_utils_benchmarks})
# google_cloud_cpp_add_executable(target "common" "${fname}")
# add_test(NAME ${target} COMMAND ${target})
# target_link_libraries(
# ${target}
# PRIVATE google-cloud-cpp::grpc_utils google-cloud-cpp::common
# benchmark::benchmark_main)
# google_cloud_cpp_add_common_options(${target})
# endforeach ()
# endif ()

1
contrib/jwt-cpp vendored Submodule

@ -0,0 +1 @@
Subproject commit a6927cb8140858c34e05d1a954626b9849fbcdfc

View File

@ -0,0 +1,23 @@
set(ENABLE_JWT_CPP_DEFAULT OFF)
if(ENABLE_LIBRARIES AND CLICKHOUSE_CLOUD)
set(ENABLE_JWT_CPP_DEFAULT ON)
endif()
option(ENABLE_JWT_CPP "Enable jwt-cpp library" ${ENABLE_JWT_CPP_DEFAULT})
if (NOT ENABLE_JWT_CPP)
message(STATUS "Not using jwt-cpp")
return()
endif()
if(ENABLE_JWT_CPP)
if(NOT TARGET OpenSSL::Crypto)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use jwt-cpp without OpenSSL")
endif()
endif()
set (JWT_CPP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/jwt-cpp/include")
add_library (_jwt-cpp INTERFACE)
target_include_directories(_jwt-cpp SYSTEM BEFORE INTERFACE ${JWT_CPP_INCLUDE_DIR})
add_library(ch_contrib::jwt-cpp ALIAS _jwt-cpp)

View File

@ -17,3 +17,4 @@ git config submodule."contrib/protobuf".update '!../sparse-checkout/update-proto
git config submodule."contrib/postgres".update '!../sparse-checkout/update-postgres.sh'
git config submodule."contrib/libxml2".update '!../sparse-checkout/update-libxml2.sh'
git config submodule."contrib/brotli".update '!../sparse-checkout/update-brotli.sh'
git config submodule."contrib/google-cloud-cpp".update '!../sparse-checkout/update-google-cloud-cpp.sh'

View File

@ -7,6 +7,7 @@ echo '/*' > $FILES_TO_CHECKOUT
echo '!/*/*' >> $FILES_TO_CHECKOUT
echo '/src/aws-cpp-sdk-core/*' >> $FILES_TO_CHECKOUT
echo '/generated/src/aws-cpp-sdk-s3/*' >> $FILES_TO_CHECKOUT
echo '/generated/src/aws-cpp-sdk-aws/*' >> $FILES_TO_CHECKOUT
git config core.sparsecheckout true
git checkout $1

View File

@ -0,0 +1,18 @@
#!/bin/sh
echo "Using sparse checkout for google-cloud-cpp"
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
echo '!/*' > $FILES_TO_CHECKOUT
echo '/google/cloud/*.cc' >> $FILES_TO_CHECKOUT
echo '/google/cloud/*.h' >> $FILES_TO_CHECKOUT
echo '/google/cloud/internal/*' >> $FILES_TO_CHECKOUT
echo '/google/cloud/grpc_utils/*' >> $FILES_TO_CHECKOUT
echo '/google/cloud/kms/*' >> $FILES_TO_CHECKOUT
echo '/cmake/*' >> $FILES_TO_CHECKOUT
echo '/protos/*' >> $FILES_TO_CHECKOUT
echo '/external/googleapis' >> $FILES_TO_CHECKOUT
git config core.sparsecheckout true
git checkout $1
git read-tree -mu HEAD

View File

@ -24,7 +24,7 @@ git config --file .gitmodules --get-regexp '.*path' | sed 's/[^ ]* //' | xargs -
# We don't want to depend on any third-party CMake files.
# To check it, find and delete them.
grep -o -P '"contrib/[^"]+"' .gitmodules |
grep -v -P 'contrib/(llvm-project|google-protobuf|grpc|abseil-cpp|corrosion|aws-crt-cpp)' |
grep -v -P 'contrib/(llvm-project|google-protobuf|grpc|abseil-cpp|corrosion|aws-crt-cpp|google-cloud-cpp)' |
xargs -I@ find @ \
-'(' -name 'CMakeLists.txt' -or -name '*.cmake' -')' -and -not -name '*.h.cmake' \
-delete

View File

@ -1,7 +1,7 @@
# The Dockerfile.ubuntu exists for the tests/ci/docker_server.py script
# If the image is built from Dockerfile.alpine, then the `-alpine` suffix is added automatically,
# so the only purpose of Dockerfile.ubuntu is to push `latest`, `head` and so on w/o suffixes
FROM ubuntu:20.04 AS glibc-donor
FROM ubuntu:22.04 AS glibc-donor
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
@ -9,7 +9,11 @@ RUN arch=${TARGETARCH:-amd64} \
amd64) rarch=x86_64 ;; \
arm64) rarch=aarch64 ;; \
esac \
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu \
&& case $arch in \
amd64) ln /lib/linux-gnu/ld-linux-x86-64.so.2 /lib/linux-gnu/ld-2.35.so ;; \
arm64) ln /lib/linux-gnu/ld-linux-aarch64.so.1 /lib/linux-gnu/ld-2.35.so ;; \
esac
FROM alpine
@ -20,21 +24,21 @@ ENV LANG=en_US.UTF-8 \
TZ=UTC \
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.35.so /lib/
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
COPY entrypoint.sh /entrypoint.sh
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.35.so /lib64/ld-linux-x86-64.so.2 ;; \
arm64) ln -sf /lib/ld-2.35.so /lib/ld-linux-aarch64.so.1 ;; \
esac
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.10.1.2812"
ARG VERSION="24.10.3.21"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""
@ -82,7 +86,8 @@ RUN arch=${TARGETARCH:-amd64} \
ARG DEFAULT_CONFIG_DIR="/etc/clickhouse-keeper"
ARG DEFAULT_DATA_DIR="/var/lib/clickhouse-keeper"
ARG DEFAULT_LOG_DIR="/var/log/clickhouse-keeper"
RUN mkdir -p "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}" \
RUN clickhouse-keeper --version \
&& mkdir -p "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}" \
&& chown clickhouse:clickhouse "${DEFAULT_DATA_DIR}" \
&& chown root:clickhouse "${DEFAULT_LOG_DIR}" \
&& chmod ugo+Xrw -R "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}"

View File

@ -1,21 +1,31 @@
#!/bin/bash
set +x
set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
if command -v gosu &> /dev/null; then
gosu="gosu $USER:$GROUP"
elif command -v su-exec &> /dev/null; then
@ -82,11 +92,11 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
if [ -f "$KEEPER_CONFIG" ]; then
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
exec $gosu clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
fi
# There is no config file. Will use embedded one
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
exec $gosu clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -3,10 +3,10 @@ compilers and build settings. Correctly configured Docker daemon is single depen
Usage:
Build deb package with `clang-18` in `debug` mode:
Build deb package with `clang-19` in `debug` mode:
```
$ mkdir deb/test_output
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-18 --debug-build
$ ./packager --output-dir deb/test_output/ --package-type deb --compiler=clang-19 --debug-build
$ ls -l deb/test_output
-rw-r--r-- 1 root root 3730 clickhouse-client_22.2.2+debug_all.deb
-rw-r--r-- 1 root root 84221888 clickhouse-common-static_22.2.2+debug_amd64.deb
@ -17,11 +17,11 @@ $ ls -l deb/test_output
```
Build ClickHouse binary with `clang-18` and `address` sanitizer in `relwithdebuginfo`
Build ClickHouse binary with `clang-19` and `address` sanitizer in `relwithdebuginfo`
mode:
```
$ mkdir $HOME/some_clickhouse
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-18 --sanitizer=address
$ ./packager --output-dir=$HOME/some_clickhouse --package-type binary --compiler=clang-19 --sanitizer=address
$ ls -l $HOME/some_clickhouse
-rwxr-xr-x 1 root root 787061952 clickhouse
lrwxrwxrwx 1 root root 10 clickhouse-benchmark -> clickhouse

View File

@ -407,20 +407,20 @@ def parse_args() -> argparse.Namespace:
parser.add_argument(
"--compiler",
choices=(
"clang-18",
"clang-18-darwin",
"clang-18-darwin-aarch64",
"clang-18-aarch64",
"clang-18-aarch64-v80compat",
"clang-18-ppc64le",
"clang-18-riscv64",
"clang-18-s390x",
"clang-18-loongarch64",
"clang-18-amd64-compat",
"clang-18-amd64-musl",
"clang-18-freebsd",
"clang-19",
"clang-19-darwin",
"clang-19-darwin-aarch64",
"clang-19-aarch64",
"clang-19-aarch64-v80compat",
"clang-19-ppc64le",
"clang-19-riscv64",
"clang-19-s390x",
"clang-19-loongarch64",
"clang-19-amd64-compat",
"clang-19-amd64-musl",
"clang-19-freebsd",
),
default="clang-18",
default="clang-19",
help="a compiler to use",
)
parser.add_argument(

View File

@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.10.1.2812"
ARG VERSION="24.10.3.21"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -1,4 +1,4 @@
FROM ubuntu:20.04
FROM ubuntu:22.04
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
# It could be removed after we move on a version 23:04+
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.10.1.2812"
ARG VERSION="24.10.3.21"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off
@ -88,34 +88,34 @@ RUN if [ -n "${single_binary_location_url}" ]; then \
#docker-official-library:on
# A fallback to installation from ClickHouse repository
RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \
apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq libksba8 \
&& apt-get autoremove -yq \
; fi
# It works unless the clickhouse binary already exists
RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \
; apt-get update \
&& apt-get install --yes --no-install-recommends \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq dirmngr gnupg2 \
&& chmod ugo+Xrw -R /etc/clickhouse-server /etc/clickhouse-client
# The last chmod is here to make the next one is No-op in docker official library Dockerfile
# post install
# we need to allow "others" access to clickhouse folder, because docker container
@ -126,8 +126,6 @@ RUN clickhouse-local -q 'SELECT * FROM system.build_options' \
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV TZ UTC
RUN mkdir /docker-entrypoint-initdb.d

View File

@ -1,3 +1,11 @@
<!---
The README.md is generated by README.sh from the following sources:
- README.src/content.md
- README.src/license.md
If you want to change it, edit these files
-->
# ClickHouse Server Docker Image
## What is ClickHouse?
@ -12,14 +20,19 @@ For more information and documentation see https://clickhouse.com/.
- The `latest` tag points to the latest release of the latest stable branch.
- Branch tags like `22.2` point to the latest release of the corresponding branch.
- Full version tags like `22.2.3.5` point to the corresponding release.
- Full version tags like `22.2.3` and `22.2.3.5` point to the corresponding release.
<!-- docker-official-library:off -->
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
<!-- docker-official-library:on -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
@ -29,7 +42,7 @@ For more information and documentation see https://clickhouse.com/.
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking).
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
@ -46,7 +59,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
@ -69,7 +82,7 @@ echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
`22.6.3.35`
or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
@ -87,8 +100,8 @@ Typically you may want to mount the following folders inside your container to a
```bash
docker run -d \
-v $(realpath ./ch_data):/var/lib/clickhouse/ \
-v $(realpath ./ch_logs):/var/log/clickhouse-server/ \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
@ -110,6 +123,8 @@ docker run -d \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
@ -125,8 +140,8 @@ docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /pa
### Start server as custom user
```bash
# $(pwd)/data/clickhouse should exist and be owned by current user
docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
@ -134,7 +149,7 @@ When you use the image with local directories mounted, you probably want to spec
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
### How to create default database and user on starting

38
docker/server/README.sh Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -ueo pipefail
# A script to generate README.sh close to as it done in https://github.com/docker-library/docs
WORKDIR=$(dirname "$0")
SCRIPT_NAME=$(basename "$0")
CONTENT=README.src/content.md
LICENSE=README.src/license.md
cd "$WORKDIR"
R=README.md
cat > "$R" <<EOD
<!---
The $R is generated by $SCRIPT_NAME from the following sources:
- $CONTENT
- $LICENSE
If you want to change it, edit these files
-->
EOD
cat "$CONTENT" >> "$R"
cat >> "$R" <<EOD
## License
$(cat $LICENSE)
EOD
# Remove %%LOGO%% from the file with one line below
sed -i '/^%%LOGO%%/,+1d' "$R"
# Replace each %%IMAGE%% with our `clickhouse/clickhouse-server`
sed -i '/%%IMAGE%%/s:%%IMAGE%%:clickhouse/clickhouse-server:g' $R

View File

@ -0,0 +1 @@
ClickHouse is the fastest and most resource efficient OSS database for real-time apps and analytics.

Some files were not shown because too many files have changed in this diff Show More