Revert "improve CI with digest for docker, build and test jobs (#56317)"

This reverts commit 7844fcc196.
This commit is contained in:
Max K 2023-12-15 15:48:01 +01:00 committed by GitHub
parent 1780671443
commit 8c7add0334
73 changed files with 2719 additions and 3555 deletions

View File

@ -18,6 +18,9 @@ runs:
echo "Setup the common ENV variables"
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/${{inputs.job_type}}
REPO_COPY=${{runner.temp}}/${{inputs.job_type}}/git-repo-copy
IMAGES_PATH=${{runner.temp}}/images_path
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
if [ -z "${{env.GITHUB_JOB_OVERRIDDEN}}" ] && [ "true" == "${{inputs.nested_job}}" ]; then
echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs"
@ -27,4 +30,6 @@ runs:
shell: bash
run: |
# to remove every leftovers
sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
sudo rm -fr "$TEMP_PATH"
mkdir -p "$REPO_COPY"
cp -a "$GITHUB_WORKSPACE"/. "$REPO_COPY"/

View File

@ -10,19 +10,27 @@ on: # yamllint disable-line rule:truthy
branches:
- 'backport/**'
jobs:
RunConfig:
CheckLabels:
runs-on: [self-hosted, style-checker]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
# Run the first check always, even if the CI is cancelled
if: ${{ always() }}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true # to ensure correct digests
clear-repository: true
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
PythonUnitTests:
runs-on: [self-hosted, style-checker]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Python unit tests
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
@ -32,235 +40,273 @@ jobs:
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
- name: PrepareRunConfig
id: runconfig
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
needs: CheckLabels
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
CompatibilityCheckX86:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check (amd64)
test_name: Compatibility check X86
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
CompatibilityCheckAarch64:
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check (aarch64)
test_name: Compatibility check X86
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
#########################################################################################
#################################### ORDINARY BUILDS ####################################
#########################################################################################
BuilderDebRelease:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_release
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAarch64:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAsan:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_asan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebTsan:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_tsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebDebug:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_debug
data: ${{ needs.RunConfig.outputs.data }}
BuilderBinDarwin:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0
BuilderBinDarwinAarch64:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin_aarch64
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docker server and keeper images
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
filter: tree:0
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
if: ${{ success() || failure() }}
needs:
- RunConfig
- BuilderDebRelease
- BuilderDebAarch64
- BuilderDebAsan
- BuilderDebDebug
- BuilderDebRelease
- BuilderDebTsan
- BuilderDebDebug
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
BuilderSpecialReport:
if: ${{ !failure() && !cancelled() }}
if: ${{ success() || failure() }}
needs:
- RunConfig
- BuilderBinDarwin
- BuilderBinDarwinAarch64
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
############################################################################################
#################################### INSTALL PACKAGES ######################################
############################################################################################
InstallPackagesTestRelease:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (amd64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
InstallPackagesTestAarch64:
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (arm64)
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
##############################################################################################
FunctionalStatelessTestAsan:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (asan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
##############################################################################################
FunctionalStatefulTestDebug:
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (debug)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################
StressTestTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs:
- DockerHubPush
- DockerServerImages
- BuilderReport
- BuilderSpecialReport
- FunctionalStatelessTestAsan

138
.github/workflows/docs_check.yml vendored Normal file
View File

@ -0,0 +1,138 @@
name: DocsCheck
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
on: # yamllint disable-line rule:truthy
pull_request:
types:
- synchronize
- reopened
- opened
branches:
- master
paths:
- '**.md'
- 'docker/docs/**'
- 'docs/**'
- 'utils/check-style/aspell-ignore/**'
- 'tests/ci/docs_check.py'
- '.github/workflows/docs_check.yml'
jobs:
CheckLabels:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Labels check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
DockerHubPushAarch64:
needs: CheckLabels
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
needs: CheckLabels
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
StyleCheck:
needs: DockerHubPush
# We need additional `&& ! cancelled()` to have the job being able to cancel
if: ${{ success() || failure() || ( always() && ! cancelled() ) }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Style check
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 style_check.py
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
DocsCheck:
needs: DockerHubPush
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docs check
runner_type: func-tester-aarch64
additional_envs: |
run_command: |
cd "$REPO_COPY/tests/ci"
python3 docs_check.py
FinishCheck:
needs:
- StyleCheck
- DockerHubPush
- DocsCheck
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 finish_check.py
python3 merge_pr.py --check-approved

View File

@ -11,14 +11,16 @@ on: # yamllint disable-line rule:truthy
workflow_call:
jobs:
KeeperJepsenRelease:
uses: ./.github/workflows/reusable_simple_job.yml
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Jepsen keeper check
runner_type: style-checker
run_command: |
cd "$REPO_COPY/tests/ci"
python3 jepsen_check.py keeper
# ServerJepsenRelease:
# uses: ./.github/workflows/reusable_simple_job.yml
# runs-on: [self-hosted, style-checker]
# uses: ./.github/workflows/reusable_test.yml
# with:
# test_name: Jepsen server check
# runner_type: style-checker

View File

@ -8,26 +8,19 @@ on: # yamllint disable-line rule:truthy
# schedule:
# - cron: '0 0 2 31 1' # never for now
workflow_call:
inputs:
data:
description: json ci data
type: string
required: true
jobs:
BuilderFuzzers:
uses: ./.github/workflows/reusable_build.yml
with:
build_name: fuzzers
data: ${{ inputs.data }}
libFuzzerTest:
needs: [BuilderFuzzers]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: libFuzzer tests
runner_type: func-tester
data: ${{ inputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 libfuzzer_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"

File diff suppressed because it is too large Load Diff

View File

@ -13,36 +13,67 @@ jobs:
Debug:
# The task for having a preserved ENV and event.json for later investigation
uses: ./.github/workflows/debug.yml
RunConfig:
runs-on: [self-hosted, style-checker]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true # to ensure correct digests
- name: PrepareRunConfig
id: runconfig
clear-repository: true
- name: Images check
run: |
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --rebuild-all-docker --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
BuildDockers:
needs: [RunConfig]
uses: ./.github/workflows/reusable_docker.yml
with:
data: "${{ needs.RunConfig.outputs.data }}"
set_latest: true
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix aarch64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_images_check.py --suffix amd64 --all
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
SonarCloud:
runs-on: [self-hosted, builder]
env:

File diff suppressed because it is too large Load Diff

View File

@ -13,165 +13,171 @@ on: # yamllint disable-line rule:truthy
- '2[1-9].[1-9]'
jobs:
RunConfig:
runs-on: [self-hosted, style-checker]
outputs:
data: ${{ steps.runconfig.outputs.CI_DATA }}
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true # to ensure correct digests
- name: Labels check
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 run_check.py
- name: Python unit tests
python3 docker_images_check.py --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json
DockerHubPushAmd64:
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Images check
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
echo "Testing the main ci directory"
python3 -m unittest discover -s . -p 'test_*.py'
for dir in *_lambda/; do
echo "Testing $dir"
python3 -m unittest discover -s "$dir" -p 'test_*.py'
done
- name: PrepareRunConfig
id: runconfig
python3 docker_images_check.py --suffix amd64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json
DockerHubPush:
needs: [DockerHubPushAmd64, DockerHubPushAarch64]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags
filter: tree:0
- name: Download changed aarch64 images
uses: actions/download-artifact@v3
with:
name: changed_images_aarch64
path: ${{ runner.temp }}
- name: Download changed amd64 images
uses: actions/download-artifact@v3
with:
name: changed_images_amd64
path: ${{ runner.temp }}
- name: Images check
run: |
echo "::group::configure CI run"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --rebuild-all-binaries --outfile ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
echo "::group::CI run configure results"
python3 -m json.tool ${{ runner.temp }}/ci_run_data.json
echo "::endgroup::"
{
echo 'CI_DATA<<EOF'
cat ${{ runner.temp }}/ci_run_data.json
echo 'EOF'
} >> "$GITHUB_OUTPUT"
- name: Re-create GH statuses for skipped jobs if any
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64
- name: Upload images files to artifacts
uses: actions/upload-artifact@v3
with:
name: changed_images
path: ${{ runner.temp }}/changed_images.json
CompatibilityCheckX86:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check (amd64)
test_name: Compatibility check X86
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
CompatibilityCheckAarch64:
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Compatibility check (aarch64)
test_name: Compatibility check X86
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
#########################################################################################
#################################### ORDINARY BUILDS ####################################
#########################################################################################
BuilderDebRelease:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_release
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAarch64:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebAsan:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_asan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebUBsan:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_ubsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebTsan:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_tsan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebMsan:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_msan
data: ${{ needs.RunConfig.outputs.data }}
BuilderDebDebug:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: package_debug
data: ${{ needs.RunConfig.outputs.data }}
BuilderBinDarwin:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
BuilderBinDarwinAarch64:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() }}
needs: [DockerHubPush]
uses: ./.github/workflows/reusable_build.yml
with:
build_name: binary_darwin_aarch64
checkout_depth: 0
data: ${{ needs.RunConfig.outputs.data }}
############################################################################################
##################################### Docker images #######################################
############################################################################################
DockerServerImages:
needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Docker server and keeper images
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
checkout_depth: 0
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse
needs:
- BuilderDebRelease
- BuilderDebAarch64
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
filter: tree:0
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-server --image-path docker/server
python3 docker_server.py --release-type head --no-push \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
BuilderReport:
if: ${{ success() || failure() }}
needs:
- RunConfig
- BuilderDebRelease
- BuilderDebAarch64
- BuilderDebAsan
@ -179,39 +185,32 @@ jobs:
- BuilderDebUBsan
- BuilderDebMsan
- BuilderDebDebug
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
BuilderSpecialReport:
if: ${{ !failure() && !cancelled() }}
if: ${{ success() || failure() }}
needs:
- RunConfig
- BuilderDebRelease
- BuilderDebAarch64
- BuilderDebAsan
- BuilderDebTsan
- BuilderDebUBsan
- BuilderDebMsan
- BuilderDebDebug
- BuilderBinDarwin
- BuilderBinDarwinAarch64
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
NEEDS_DATA<<NDENV
${{ toJSON(needs) }}
NDENV
run_command: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 build_report_check.py "$CHECK_NAME"
MarkReleaseReady:
needs:
@ -233,224 +232,282 @@ jobs:
#################################### INSTALL PACKAGES ######################################
############################################################################################
InstallPackagesTestRelease:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (amd64)
runner_type: style-checker
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
InstallPackagesTestAarch64:
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Install packages (arm64)
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 install_check.py "$CHECK_NAME"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
##############################################################################################
FunctionalStatelessTestRelease:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestAarch64:
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (aarch64)
runner_type: func-tester-aarch64
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestAsan:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (asan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (tsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestMsan:
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (msan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
batches: 5
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestUBsan:
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (ubsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
batches: 2
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestMsan:
needs: [BuilderDebMsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (msan)
runner_type: func-tester
additional_envs: |
KILL_TIMEOUT=10800
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatelessTestDebug:
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (debug)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=10800
batches: 5
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
##############################################################################################
FunctionalStatefulTestRelease:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (release)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestAarch64:
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAarch64]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (aarch64)
runner_type: func-tester-aarch64
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestAsan:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (asan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (tsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestMsan:
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebMsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (msan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestUBsan:
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (ubsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
FunctionalStatefulTestDebug:
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateful tests (debug)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
additional_envs: |
KILL_TIMEOUT=3600
run_command: |
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################
StressTestAsan:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (asan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
StressTestTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
StressTestMsan:
needs: [RunConfig, BuilderDebMsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebMsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (msan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
StressTestUBsan:
needs: [RunConfig, BuilderDebUBsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebUBsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (ubsan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
StressTestDebug:
needs: [RunConfig, BuilderDebDebug]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebDebug]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (debug)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
run_command: |
cd "$REPO_COPY/tests/ci"
python3 stress_check.py "$CHECK_NAME"
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsAsan:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
IntegrationTestsAnalyzerAsan:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebAsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan, analyzer)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebTsan]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
batches: 6
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
needs: [BuilderDebRelease]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
batches: 4
run_command: |
cd "$REPO_COPY/tests/ci"
python3 integration_test_check.py "$CHECK_NAME"
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs:
- DockerHubPush
- DockerServerImages
- BuilderReport
- BuilderSpecialReport

View File

@ -22,10 +22,6 @@ name: Build ClickHouse
description: the label of runner to use
default: builder
type: string
data:
description: json ci data
type: string
required: true
additional_envs:
description: additional ENV variables to setup the job
type: string
@ -33,7 +29,6 @@ name: Build ClickHouse
jobs:
Build:
name: Build-${{inputs.build_name}}
if: contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name)
env:
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
runs-on: [self-hosted, '${{inputs.runner_type}}']
@ -42,7 +37,6 @@ jobs:
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
submodules: true
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
@ -50,9 +44,6 @@ jobs:
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
EOF
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
@ -69,18 +60,20 @@ jobs:
uses: ./.github/actions/common_setup
with:
job_type: build_check
- name: Pre
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.build_name}}'
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Build
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/build_check.py" "$BUILD_NAME"
- name: Post
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
- name: Mark as done
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.build_name}}'
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -1,68 +0,0 @@
name: Build docker images
'on':
workflow_call:
inputs:
data:
description: json with ci data from todo job
required: true
type: string
set_latest:
description: set latest tag for resulting multiarch manifest
required: false
type: boolean
default: false
jobs:
DockerBuildAarch64:
runs-on: [self-hosted, style-checker-aarch64]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_aarch64) != '[]'
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
--suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_aarch64) }}'
DockerBuildAmd64:
runs-on: [self-hosted, style-checker]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_amd64) != '[]'
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
python3 "${GITHUB_WORKSPACE}/tests/ci/docker_images_check.py" \
--suffix amd64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_amd64) }}'
DockerMultiArchManifest:
needs: [DockerBuildAmd64, DockerBuildAarch64]
runs-on: [self-hosted, style-checker]
if: |
!failure() && !cancelled() && toJson(fromJson(inputs.data).docker_data.missing_multi) != '[]'
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
ref: ${{ fromJson(inputs.data).git_ref }}
- name: Build images
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
if [ "${{ inputs.set_latest }}" == "true" ]; then
echo "latest tag will be set for resulting manifests"
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}' \
--set-latest
else
python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 \
--image-tags '${{ toJson(fromJson(inputs.data).docker_data.images) }}' \
--missing-images '${{ toJson(fromJson(inputs.data).docker_data.missing_multi) }}'
fi

View File

@ -1,90 +0,0 @@
### For the pure soul wishes to move it to another place
# https://github.com/orgs/community/discussions/9050
name: Simple job
'on':
workflow_call:
inputs:
test_name:
description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
required: true
type: string
runner_type:
description: the label of runner to use
required: true
type: string
run_command:
description: the command to launch the check
default: ""
required: false
type: string
checkout_depth:
description: the value of the git shallow checkout
required: false
type: number
default: 1
submodules:
description: if the submodules should be checked out
required: false
type: boolean
default: false
additional_envs:
description: additional ENV variables to setup the job
type: string
working-directory:
description: sets custom working directory
type: string
default: ""
git_ref:
description: commit to use, merge commit for pr or head
required: false
type: string
default: ${{ github.event.after }} # no merge commit
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
jobs:
Test:
runs-on: [self-hosted, '${{inputs.runner_type}}']
name: ${{inputs.test_name}}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ inputs.git_ref }}
submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
EOF
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
- name: Run
run: |
if [ -n '${{ inputs.working-directory }}' ]; then
cd "${{ inputs.working-directory }}"
else
cd "$GITHUB_WORKSPACE/tests/ci"
fi
${{ inputs.run_command }}
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -14,10 +14,13 @@ name: Testing workflow
required: true
type: string
run_command:
description: the command to launch the check
default: ""
required: false
description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'`
required: true
type: string
batches:
description: how many batches for the test will be launched
default: 1
type: number
checkout_depth:
description: the value of the git shallow checkout
required: false
@ -31,89 +34,80 @@ name: Testing workflow
additional_envs:
description: additional ENV variables to setup the job
type: string
data:
description: ci data
type: string
required: true
working-directory:
description: sets custom working directory
type: string
default: ""
secrets:
secret_envs:
description: if given, it's passed to the environments
required: false
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECK_NAME: ${{inputs.test_name}}
jobs:
PrepareStrategy:
# batches < 1 is misconfiguration,
# and we need this step only for batches > 1
if: ${{ inputs.batches > 1 }}
runs-on: [self-hosted, style-checker-aarch64]
outputs:
batches: ${{steps.batches.outputs.batches}}
steps:
- name: Calculate batches
id: batches
run: |
batches_output=$(python3 -c 'import json; print(json.dumps(list(range(${{inputs.batches}}))))')
echo "batches=${batches_output}" >> "$GITHUB_OUTPUT"
Test:
runs-on: [self-hosted, '${{inputs.runner_type}}']
if: ${{ !failure() && !cancelled() && contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.test_name) }}
name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
# If PrepareStrategy is skipped for batches == 1,
# we still need to launch the test.
# `! failure()` is mandatory here to launch on skipped Job
# `&& !cancelled()` to allow the be cancelable
if: ${{ ( !failure() && !cancelled() ) && inputs.batches > 0 }}
# Do not add `-0` to the end, if there's only one batch
name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
env:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }}
runs-on: [self-hosted, '${{inputs.runner_type}}']
needs: [PrepareStrategy]
strategy:
fail-fast: false # we always wait for entire matrix
matrix:
batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
# if PrepareStrategy does not have batches, we use 0
batch: ${{ needs.PrepareStrategy.outputs.batches
&& fromJson(needs.PrepareStrategy.outputs.batches)
|| fromJson('[0]')}}
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
ref: ${{ fromJson(inputs.data).git_ref }}
submodules: ${{inputs.submodules}}
fetch-depth: ${{inputs.checkout_depth}}
filter: tree:0
- name: Set build envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
EOF
- name: Common setup
uses: ./.github/actions/common_setup
with:
job_type: test
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Setup batch
if: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 }}
if: ${{ inputs.batches > 1}}
run: |
cat >> "$GITHUB_ENV" << 'EOF'
RUN_BY_HASH_NUM=${{matrix.batch}}
RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }}
RUN_BY_HASH_TOTAL=${{inputs.batches}}
EOF
- name: Pre run
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
- name: Run
run: |
if [ -n "${{ inputs.working-directory }}" ]; then
cd "${{ inputs.working-directory }}"
else
cd "$GITHUB_WORKSPACE/tests/ci"
fi
if [ -n "$(echo '${{ inputs.run_command }}' | tr -d '\n')" ]; then
echo "Running command from workflow input"
${{ inputs.run_command }}
else
echo "Running command from job config"
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --run --job-name '${{inputs.test_name}}'
fi
- name: Post run
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
- name: Mark as done
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
- name: Run test
run: ${{inputs.run_command}}
- name: Clean
if: always()
uses: ./.github/actions/clean

View File

@ -36,7 +36,6 @@ ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.11.2.11"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
@ -48,27 +47,15 @@ ARG DIRECT_DOWNLOAD_URLS=""
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
&& cd /tmp && rm -f /tmp/*tgz && rm -f /tmp/*tgz.sha512 |: \
&& if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \
&& for url in $DIRECT_DOWNLOAD_URLS; do \
echo "Get ${url}" \
&& wget -c -q "$url" \
; done \
else \
for package in ${PACKAGES}; do \
cd /tmp \
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& for package in ${PACKAGES}; do \
( \
cd /tmp \
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \
; done \
fi \
&& cat *.tgz.sha512 | sha512sum -c \
&& for file in *.tgz; do \
if [ -f "$file" ]; then \
echo "Unpacking $file"; \
tar xvzf "$file" --strip-components=1 -C /; \
fi \
&& sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \
&& tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \
) \
; done \
&& rm /tmp/*.tgz /install -r \
&& addgroup -S -g 101 clickhouse \

View File

@ -149,7 +149,7 @@ then
mkdir -p "$PERF_OUTPUT"
cp -r ../tests/performance "$PERF_OUTPUT"
cp -r ../tests/config/top_level_domains "$PERF_OUTPUT"
cp -r ../tests/performance/scripts/config "$PERF_OUTPUT" ||:
cp -r ../docker/test/performance-comparison/config "$PERF_OUTPUT" ||:
for SRC in /output/clickhouse*; do
# Copy all clickhouse* files except packages and bridges
[[ "$SRC" != *.* ]] && [[ "$SRC" != *-bridge ]] && \
@ -160,7 +160,7 @@ then
ln -sf clickhouse "$PERF_OUTPUT"/clickhouse-keeper
fi
cp -r ../tests/performance/scripts "$PERF_OUTPUT"/scripts ||:
cp -r ../docker/test/performance-comparison "$PERF_OUTPUT"/scripts ||:
prepare_combined_output "$PERF_OUTPUT"
# We have to know the revision that corresponds to this binary build.

View File

@ -34,7 +34,6 @@ ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.11.2.11"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""
# user/group precreated explicitly with fixed uid/gid on purpose.
# It is especially important for rootless containers: in that case entrypoint
@ -44,26 +43,15 @@ ARG DIRECT_DOWNLOAD_URLS=""
# The same uid / gid (101) is used both for alpine and ubuntu.
RUN arch=${TARGETARCH:-amd64} \
&& cd /tmp \
&& if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
echo "installing from provided urls with tgz packages: ${DIRECT_DOWNLOAD_URLS}" \
&& for url in $DIRECT_DOWNLOAD_URLS; do \
echo "Get ${url}" \
&& wget -c -q "$url" \
; done \
else \
for package in ${PACKAGES}; do \
echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& for package in ${PACKAGES}; do \
( \
cd /tmp \
&& echo "Get ${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz" \
&& wget -c -q "${REPOSITORY}/${package}-${VERSION}-${arch}.tgz.sha512" \
; done \
fi \
&& cat *.tgz.sha512 | sed 's:/output/:/tmp/:' | sha512sum -c \
&& for file in *.tgz; do \
if [ -f "$file" ]; then \
echo "Unpacking $file"; \
tar xvzf "$file" --strip-components=1 -C /; \
fi \
&& sed 's:/output/:/tmp/:' < "${package}-${VERSION}-${arch}.tgz.sha512" | sha512sum -c \
&& tar xvzf "${package}-${VERSION}-${arch}.tgz" --strip-components=1 -C / \
) \
; done \
&& rm /tmp/*.tgz /install -r \
&& addgroup -S -g 101 clickhouse \

View File

@ -37,7 +37,6 @@ ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# from debs created by CI build, for example:
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://..." -t ...
ARG deb_location_url=""
ARG DIRECT_DOWNLOAD_URLS=""
# set non-empty single_binary_location_url to create docker image
# from a single binary url (useful for non-standard builds - with sanitizers, for arm64).
@ -45,18 +44,6 @@ ARG single_binary_location_url=""
ARG TARGETARCH
# install from direct URL
RUN if [ -n "${DIRECT_DOWNLOAD_URLS}" ]; then \
echo "installing from custom predefined urls with deb packages: ${DIRECT_DOWNLOAD_URLS}" \
&& rm -rf /tmp/clickhouse_debs \
&& mkdir -p /tmp/clickhouse_debs \
&& for url in $DIRECT_DOWNLOAD_URLS; do \
wget --progress=bar:force:noscroll "$url" -P /tmp/clickhouse_debs || exit 1 \
; done \
&& dpkg -i /tmp/clickhouse_debs/*.deb \
&& rm -rf /tmp/* ; \
fi
# install from a web location with deb packages
RUN arch="${TARGETARCH:-amd64}" \
&& if [ -n "${deb_location_url}" ]; then \

View File

@ -39,8 +39,18 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY run.sh /
COPY * /
CMD ["bash", "/run.sh"]
# Bind everything to one NUMA node, if there's more than one. Theoretically the
# node #0 should be less stable because of system interruptions. We bind
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
# both servers and the tmpfs on which the database is stored. How to do it
# is unclear, but by default tmpfs uses
# 'process allocation policy', not sure which process but hopefully the one that
# writes to it, so just bind the downloader script as well.
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
CMD ["bash", "-c", "node=$((RANDOM % $(numactl --hardware | sed -n 's/^.*available:\\(.*\\)nodes.*$/\\1/p'))); echo Will bind to NUMA node $node; numactl --cpunodebind=$node --membind=$node /entrypoint.sh"]
# docker run --network=host --volume <workspace>:/workspace --volume=<output>:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison

View File

@ -25,7 +25,7 @@ The check status summarizes the report in a short text message like `1 faster, 1
* `1 unstable` -- how many queries have unstable results,
* `1 errors` -- how many errors there are in total. Action is required for every error, this number must be zero. The number of errors includes slower tests, tests that are too long, errors while running the tests and building reports, etc. Please look at the main report page to investigate these errors.
The report page itself consists of a several tables. Some of them always signify errors, e.g. "Run errors" -- the very presence of this table indicates that there were errors during the test, that are not normal and must be fixed. Some tables are mostly informational, e.g. "Test times" -- they reflect normal test results. But if a cell in such table is marked in red, this also means an error, e.g., a test is taking too long to run.
The report page itself constists of a several tables. Some of them always signify errors, e.g. "Run errors" -- the very presence of this table indicates that there were errors during the test, that are not normal and must be fixed. Some tables are mostly informational, e.g. "Test times" -- they reflect normal test results. But if a cell in such table is marked in red, this also means an error, e.g., a test is taking too long to run.
#### Tested Commits
Informational, no action required. Log messages for the commits that are tested. Note that for the right commit, we show nominal tested commit `pull/*/head` and real tested commit `pull/*/merge`, which is generated by GitHub by merging latest master to the `pull/*/head` and which we actually build and test in CI.
@ -33,12 +33,12 @@ Informational, no action required. Log messages for the commits that are tested.
#### Error Summary
Action required for every item.
This table summarizes all errors that occurred during the test. Click the links to go to the description of a particular error.
This table summarizes all errors that ocurred during the test. Click the links to go to the description of a particular error.
#### Run Errors
Action required for every item -- these are errors that must be fixed.
The errors that occurred when running some test queries. For more information about the error, download test output archive and see `test-name-err.log`. To reproduce, see 'How to run' below.
The errors that ocurred when running some test queries. For more information about the error, download test output archive and see `test-name-err.log`. To reproduce, see 'How to run' below.
#### Slow on Client
Action required for every item -- these are errors that must be fixed.
@ -65,7 +65,7 @@ You can find flame graphs for queries with performance changes in the test outpu
#### Unstable Queries
Action required for the cells marked in red.
These are the queries for which we did not observe a statistically significant change in performance, but for which the variance in query performance is very high. This means that we are likely to observe big changes in performance even in the absence of real changes, e.g. when comparing the server to itself. Such queries are going to have bad sensitivity as performance tests -- if a query has, say, 50% expected variability, this means we are going to see changes in performance up to 50%, even when there were no real changes in the code. And because of this, we won't be able to detect changes less than 50% with such a query, which is pretty bad. The reasons for the high variability must be investigated and fixed; ideally, the variability should be brought under 5-10%.
These are the queries for which we did not observe a statistically significant change in performance, but for which the variance in query performance is very high. This means that we are likely to observe big changes in performance even in the absence of real changes, e.g. when comparing the server to itself. Such queries are going to have bad sensitivity as performance tests -- if a query has, say, 50% expected variability, this means we are going to see changes in performance up to 50%, even when there were no real changes in the code. And because of this, we won't be able to detect changes less than 50% with such a query, which is pretty bad. The reasons for the high variability must be investigated and fixed; ideally, the variability should be brought under 5-10%.
The most frequent reason for instability is that the query is just too short -- e.g. below 0.1 seconds. Bringing query time to 0.2 seconds or above usually helps.
Other reasons may include:
@ -88,7 +88,7 @@ This table summarizes the changes in performance of queries in each test -- how
Action required for the cells marked in red.
This table shows the run times for all the tests. You may have to fix two kinds of errors in this table:
1) Average query run time is too long -- probably means that the preparatory steps such as creating the table and filling them with data are taking too long. Try to make them faster.
1) Average query run time is too long -- probalby means that the preparatory steps such as creating the table and filling them with data are taking too long. Try to make them faster.
2) Longest query run time is too long -- some particular queries are taking too long, try to make them faster. The ideal query run time is between 0.1 and 1 s.
#### Metric Changes
@ -186,4 +186,4 @@ analytically, but I don't know enough math to do it. It would be something
close to Wilcoxon test distribution.
### References
1\. Box, Hunter, Hunter "Statistics for exprerimenters", p. 78: "A Randomized Design Used in the Comparison of Standard and Modified Fertilizer Mixtures for Tomato Plants."
1\. Box, Hunter, Hunter "Statictics for exprerimenters", p. 78: "A Randomized Design Used in the Comparison of Standard and Modified Fertilizer Mixtures for Tomato Plants."

View File

@ -79,3 +79,4 @@ run
rm output.7z
7z a output.7z ./*.{log,tsv,html,txt,rep,svg} {right,left}/{performance,db/preprocessed_configs}

View File

@ -236,7 +236,7 @@ function run_tests
fi
fi
# For PRs w/o changes in test definitions, test only a subset of queries,
# For PRs w/o changes in test definitons, test only a subset of queries,
# and run them less times. If the corresponding environment variables are
# already set, keep those values.
#

View File

@ -7,9 +7,8 @@ export CHPC_CHECK_START_TIMESTAMP
S3_URL=${S3_URL:="https://clickhouse-builds.s3.amazonaws.com"}
BUILD_NAME=${BUILD_NAME:-package_release}
export S3_URL BUILD_NAME
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
# Sometimes AWS responds with DNS error and it's impossible to retry it with
# Sometimes AWS responde with DNS error and it's impossible to retry it with
# current curl version options.
function curl_with_retry
{
@ -89,9 +88,19 @@ chmod 777 workspace output
cd workspace
[ ! -e "/artifacts/performance.tar.zst" ] && echo "ERROR: performance.tar.zst not found" && exit 1
mkdir -p right
tar -xf "/artifacts/performance.tar.zst" -C right --no-same-owner --strip-components=1 --zstd --extract --verbose
# Download the package for the version we are going to test.
# A temporary solution for migrating into PRs directory
for prefix in "$S3_URL/PRs" "$S3_URL";
do
if curl_with_retry "$prefix/$PR_TO_TEST/$SHA_TO_TEST/$BUILD_NAME/performance.tar.zst"
then
right_path="$prefix/$PR_TO_TEST/$SHA_TO_TEST/$BUILD_NAME/performance.tar.zst"
break
fi
done
mkdir right
wget -nv -nd -c "$right_path" -O- | tar -C right --no-same-owner --strip-components=1 --zstd --extract --verbose
# Find reference revision if not specified explicitly
if [ "$REF_SHA" == "" ]; then find_reference_sha; fi
@ -149,7 +158,7 @@ cat /proc/sys/kernel/core_pattern
# Start the main comparison script.
{
time $SCRIPT_DIR/download.sh "$REF_PR" "$REF_SHA" "$PR_TO_TEST" "$SHA_TO_TEST" && \
time ../download.sh "$REF_PR" "$REF_SHA" "$PR_TO_TEST" "$SHA_TO_TEST" && \
time stage=configure "$script_path"/compare.sh ; \
} 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee compare.log

View File

@ -12,7 +12,7 @@ from
-- quantiles of randomization distributions
-- note that for small number of runs, the exact quantile might not make
-- sense, because the last possible value of randomization distribution
-- might take a larger percentage of distribution (i.e. the distribution
-- might take a larger percentage of distirbution (i.e. the distribution
-- actually has discrete values, and the last step can be large).
select quantileExactForEach(0.99)(
arrayMap(x, y -> abs(x - y), metrics_by_label[1], metrics_by_label[2]) as d
@ -44,7 +44,7 @@ from
-- for each virtual run, randomly reorder measurements
order by virtual_run, rand()
) virtual_runs
) relabeled
) relabeled
group by virtual_run, random_label
) virtual_medians
group by virtual_run -- aggregate by random_label

View File

@ -51,3 +51,4 @@ run
rm output.7z
7z a output.7z ./*.{log,tsv,html,txt,rep,svg} {right,left}/{performance,db/preprocessed_configs}

View File

@ -357,7 +357,7 @@ for query_index in queries_to_run:
prewarm_id = f"{query_prefix}.prewarm0"
try:
# During the warm-up runs, we will also:
# During the warmup runs, we will also:
# * detect queries that are exceedingly long, to fail fast,
# * collect profiler traces, which might be helpful for analyzing
# test coverage. We disable profiler for normal runs because
@ -390,7 +390,7 @@ for query_index in queries_to_run:
query_error_on_connection[conn_index] = traceback.format_exc()
continue
# Report all errors that occurred during prewarm and decide what to do next.
# Report all errors that ocurred during prewarm and decide what to do next.
# If prewarm fails for the query on all servers -- skip the query and
# continue testing the next query.
# If prewarm fails on one of the servers, run the query on the rest of them.

View File

@ -1,18 +0,0 @@
#!/bin/bash
entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
[ ! -e "$entry" ] && echo "ERROR: test scripts are not found" && exit 1
# Bind everything to one NUMA node, if there's more than one. Theoretically the
# node #0 should be less stable because of system interruptions. We bind
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
# both servers and the tmpfs on which the database is stored. How to do it
# is unclear, but by default tmpfs uses
# 'process allocation policy', not sure which process but hopefully the one that
# writes to it, so just bind the downloader script as well.
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
echo Will bind to NUMA node $node;
numactl --cpunodebind=$node --membind=$node $entry

View File

@ -123,7 +123,9 @@ class ArtifactsHelper:
return fnmatch(key, glob)
return True
results = filter(ignore, self.s3_helper.list_prefix(self.s3_prefix))
results = filter(
ignore, self.s3_helper.list_prefix(self.s3_prefix, S3_BUILDS_BUCKET)
)
return list(results)
@staticmethod

View File

@ -1,7 +1,6 @@
#!/usr/bin/env python3
import logging
import os
import subprocess
import sys
from pathlib import Path
@ -20,8 +19,11 @@ from commit_status_helper import (
get_commit,
post_commit_status,
)
from docker_images_helper import DockerImage, get_docker_image, pull_image
from env_helper import REPORT_PATH, TEMP_PATH
from docker_pull_helper import DockerImage, get_image_with_version
from env_helper import (
REPORTS_PATH,
TEMP_PATH,
)
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import TestResult
@ -67,13 +69,10 @@ def main():
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path = Path(REPORTS_PATH)
check_name = sys.argv[1] if len(sys.argv) > 1 else os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided as an input arg or in CHECK_NAME env"
check_name = sys.argv[1]
pr_info = PRInfo()
@ -85,7 +84,7 @@ def main():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = pull_image(get_docker_image(IMAGE_NAME))
docker_image = get_image_with_version(reports_path, IMAGE_NAME)
build_name = get_build_name_for_check(check_name)
urls = read_build_urls(build_name, reports_path)
@ -209,9 +208,7 @@ def main():
logging.info("Result: '%s', '%s', '%s'", status, description, report_url)
print(f"::notice ::Report url: {report_url}")
post_commit_status(
commit, status, report_url, description, check_name, pr_info, dump_to_file=True
)
post_commit_status(commit, status, report_url, description, check_name, pr_info)
if __name__ == "__main__":

View File

@ -82,21 +82,19 @@ def main():
is_ok, test_results = process_all_results(status_files)
pr_info = PRInfo()
if not test_results:
description = "No results to upload"
report_url = ""
logging.info("No results to upload")
else:
description = "" if is_ok else "Changed tests don't reproduce the bug"
report_url = upload_results(
S3Helper(),
pr_info.number,
pr_info.sha,
test_results,
status_files,
check_name_with_group,
)
return
pr_info = PRInfo()
report_url = upload_results(
S3Helper(),
pr_info.number,
pr_info.sha,
test_results,
status_files,
check_name_with_group,
)
gh = Github(get_best_robot_token(), per_page=100)
commit = get_commit(gh, pr_info.sha)
@ -104,10 +102,9 @@ def main():
commit,
"success" if is_ok else "error",
report_url,
description,
"" if is_ok else "Changed tests don't reproduce the bug",
check_name_with_group,
pr_info,
dump_to_file=True,
)

View File

@ -1,6 +1,5 @@
#!/usr/bin/env python3
import argparse
from pathlib import Path
from typing import Tuple
import subprocess
@ -10,9 +9,10 @@ import time
from ci_config import CI_CONFIG, BuildConfig
from ccache_utils import CargoCache
from docker_pull_helper import get_image_with_version
from env_helper import (
GITHUB_JOB_API_URL,
IMAGES_PATH,
REPO_COPY,
S3_BUILDS_BUCKET,
S3_DOWNLOAD,
@ -23,7 +23,6 @@ from pr_info import PRInfo
from report import BuildResult, FAILURE, StatusType, SUCCESS
from s3_helper import S3Helper
from tee_popen import TeePopen
import docker_images_helper
from version_helper import (
ClickHouseVersion,
get_version_from_repo,
@ -224,22 +223,11 @@ def upload_master_static_binaries(
print(f"::notice ::Binary static URL (compact): {url_compact}")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser("Clickhouse builder script")
parser.add_argument(
"build_name",
help="build name",
)
return parser.parse_args()
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
stopwatch = Stopwatch()
build_name = args.build_name
build_name = sys.argv[1]
build_config = CI_CONFIG.build_config[build_name]
@ -262,13 +250,15 @@ def main():
(performance_pr, pr_info.sha, build_name, "performance.tar.zst")
)
# FIXME: to be removed in favor of "skip by job digest"
# If this is rerun, then we try to find already created artifacts and just
# put them as github actions artifact (result)
# The s3_path_prefix has additional "/" in the end to prevent finding
# e.g. `binary_darwin_aarch64/clickhouse` for `binary_darwin`
check_for_success_run(s3_helper, f"{s3_path_prefix}/", build_name, version)
docker_image = get_image_with_version(IMAGES_PATH, IMAGE_NAME)
image_version = docker_image.version
logging.info("Got version from repo %s", version.string)
official_flag = pr_info.number == 0
@ -291,17 +281,13 @@ def main():
)
cargo_cache.download()
docker_image = docker_images_helper.pull_image(
docker_images_helper.get_docker_image(IMAGE_NAME)
)
packager_cmd = get_packager_cmd(
build_config,
repo_path / "docker" / "packager",
build_output_path,
cargo_cache.directory,
version.string,
docker_image.version,
image_version,
official_flag,
)

View File

@ -6,7 +6,6 @@ import os
import sys
import atexit
from pathlib import Path
from typing import List
from github import Github
@ -14,8 +13,8 @@ from env_helper import (
GITHUB_JOB_URL,
GITHUB_REPOSITORY,
GITHUB_SERVER_URL,
REPORTS_PATH,
TEMP_PATH,
REPORT_PATH,
)
from report import (
BuildResult,
@ -27,7 +26,7 @@ from report import (
)
from s3_helper import S3Helper
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from pr_info import NeedsDataType, PRInfo
from commit_status_helper import (
RerunHelper,
format_description,
@ -47,32 +46,32 @@ NEEDS_DATA = os.getenv("NEEDS_DATA", "")
def main():
logging.basicConfig(level=logging.INFO)
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
logging.info("Reports path %s", REPORTS_PATH)
reports_path = Path(REPORTS_PATH)
logging.info(
"Reports found:\n %s",
"\n ".join(p.as_posix() for p in reports_path.rglob("*.json")),
)
build_check_name = sys.argv[1]
needs_data: List[str] = []
needs_data = {} # type: NeedsDataType
required_builds = 0
if os.path.exists(NEEDS_DATA_PATH):
with open(NEEDS_DATA_PATH, "rb") as file_handler:
needs_data = json.load(file_handler)
if NEEDS_DATA:
needs_data = json.loads(NEEDS_DATA)
# drop non build jobs if any
needs_data = [d for d in needs_data if "Build" in d]
elif os.path.exists(NEEDS_DATA_PATH):
with open(NEEDS_DATA_PATH, "rb") as file_handler:
needs_data = list(json.load(file_handler).keys())
else:
assert False, "NEEDS_DATA env var required"
required_builds = len(needs_data)
if needs_data:
logging.info("The next builds are required: %s", ", ".join(needs_data))
if all(i["result"] == "skipped" for i in needs_data.values()):
logging.info("All builds are skipped, exiting")
sys.exit(0)
gh = Github(get_best_robot_token(), per_page=100)
pr_info = PRInfo()
@ -85,13 +84,14 @@ def main():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
builds_for_check = CI_CONFIG.get_builds_for_report(build_check_name)
builds_for_check = CI_CONFIG.builds_report_config[build_check_name]
required_builds = required_builds or len(builds_for_check)
# Collect reports from json artifacts
build_results = []
for build_name in builds_for_check:
build_result = BuildResult.read_json(reports_path, build_name)
report_name = BuildResult.get_report_name(build_name).stem
build_result = BuildResult.read_json(reports_path / report_name, build_name)
if build_result.is_missing:
logging.warning("Build results for %s are missing", build_name)
continue
@ -179,13 +179,7 @@ def main():
)
post_commit_status(
commit,
summary_status,
url,
description,
build_check_name,
pr_info,
dump_to_file=True,
commit, summary_status, url, description, build_check_name, pr_info
)
if summary_status == ERROR:

View File

@ -1,738 +0,0 @@
import argparse
import json
import os
import concurrent.futures
from pathlib import Path
import re
import subprocess
import sys
from typing import Any, Dict, Iterable, List, Optional
from github import Github
from s3_helper import S3Helper
from digest_helper import DockerDigester, JobDigester
import docker_images_helper
from env_helper import (
CI,
ROOT_DIR,
S3_BUILDS_BUCKET,
TEMP_PATH,
REPORT_PATH,
)
from commit_status_helper import CommitStatusData, get_commit, set_status_comment
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from ci_config import CI_CONFIG
from git_helper import Git, Runner as GitRunner, GIT_PREFIX
from report import BuildResult
from version_helper import get_version_from_repo
def get_check_name(check_name: str, batch: int, num_batches: int) -> str:
res = check_name
if num_batches > 1:
res = f"{check_name} [{batch+1}/{num_batches}]"
return res
def normalize_check_name(check_name: str) -> str:
res = check_name.lower()
for r in ((" ", "_"), ("(", "_"), (")", "_"), (",", "_"), ("/", "_")):
res = res.replace(*r)
return res
def is_build_job(job: str) -> bool:
if "package_" in job or "binary_" in job or job == "fuzzers":
return True
return False
def is_test_job(job: str) -> bool:
return not is_build_job(job) and not "Style" in job and not "Docs check" in job
def is_docs_job(job: str) -> bool:
return "Docs check" in job
def parse_args(parser: argparse.ArgumentParser) -> argparse.Namespace:
# FIXME: consider switching to sub_parser for configure, pre, run, post actions
parser.add_argument(
"--configure",
action="store_true",
help="Action that configures ci run. Calculates digests, checks job to be executed, generates json output",
)
parser.add_argument(
"--update-gh-statuses",
action="store_true",
help="Action that recreate success GH statuses for jobs that finished successfully in past and will be skipped this time",
)
parser.add_argument(
"--pre",
action="store_true",
help="Action that executes prerequesetes for the job provided in --job-name",
)
parser.add_argument(
"--run",
action="store_true",
help="Action that executes run action for specified --job-name. run_command must be configured for a given job name.",
)
parser.add_argument(
"--post",
action="store_true",
help="Action that executes post actions for the job provided in --job-name",
)
parser.add_argument(
"--mark-success",
action="store_true",
help="Action that marks job provided in --job-name (with batch provided in --batch) as successfull",
)
parser.add_argument(
"--job-name",
default="",
type=str,
help="Job name as in config",
)
parser.add_argument(
"--batch",
default=-1,
type=int,
help="Current batch number (required for --mark-success), -1 or omit for single-batch job",
)
parser.add_argument(
"--infile",
default="",
type=str,
help="Input json file or json string with ci run config",
)
parser.add_argument(
"--outfile",
default="",
type=str,
required=False,
help="otput file to write json result to, if not set - stdout",
)
parser.add_argument(
"--pretty",
action="store_true",
default=False,
help="makes json output pretty formated",
)
parser.add_argument(
"--skip-docker",
action="store_true",
default=False,
help="skip fetching docker data from dockerhub, used in --configure action (for debugging)",
)
parser.add_argument(
"--docker-digest-or-latest",
action="store_true",
default=False,
help="temporary hack to fallback to latest if image with digest as a tag is not on docker hub",
)
parser.add_argument(
"--skip-jobs",
action="store_true",
default=False,
help="skip fetching data about job runs, used in --configure action (for debugging)",
)
parser.add_argument(
"--rebuild-all-docker",
action="store_true",
default=False,
help="will create run config for rebuilding all dockers, used in --configure action (for nightly docker job)",
)
parser.add_argument(
"--rebuild-all-binaries",
action="store_true",
default=False,
help="will create run config without skipping build jobs in any case, used in --configure action (for release branches)",
)
return parser.parse_args()
def get_file_flag_name(
job_name: str, digest: str, batch: int = 0, num_batches: int = 1
) -> str:
if num_batches < 2:
return f"job_{job_name}_{digest}.ci"
else:
return f"job_{job_name}_{digest}_{batch}_{num_batches}.ci"
def get_s3_path(build_digest: str) -> str:
return f"CI_data/BUILD-{build_digest}/"
def get_s3_path_docs(digest: str) -> str:
return f"CI_data/DOCS-{digest}/"
def check_missing_images_on_dockerhub(
image_name_tag: Dict[str, str], arch: Optional[str] = None
) -> Dict[str, str]:
"""
Checks missing images on dockerhub.
Works concurrently for all given images.
Docker must be logged in.
"""
def run_docker_command(
image: str, image_digest: str, arch: Optional[str] = None
) -> Dict:
"""
aux command for fetching single docker manifest
"""
command = [
"docker",
"manifest",
"inspect",
f"{image}:{image_digest}" if not arch else f"{image}:{image_digest}-{arch}",
]
process = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
check=False,
)
return {
"image": image,
"image_digest": image_digest,
"arch": arch,
"stdout": process.stdout,
"stderr": process.stderr,
"return_code": process.returncode,
}
result: Dict[str, str] = {}
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [
executor.submit(run_docker_command, image, tag, arch)
for image, tag in image_name_tag.items()
]
responses = [
future.result() for future in concurrent.futures.as_completed(futures)
]
for resp in responses:
name, stdout, stderr, digest, arch = (
resp["image"],
resp["stdout"],
resp["stderr"],
resp["image_digest"],
resp["arch"],
)
if stderr:
if stderr.startswith("no such manifest"):
result[name] = digest
else:
print(f"Error: Unknown error: {stderr}, {name}, {arch}")
elif stdout:
if "mediaType" in stdout:
pass
else:
print(f"Error: Unknown response: {stdout}")
assert False, "FIXME"
else:
print(f"Error: No response for {name}, {digest}, {arch}")
assert False, "FIXME"
return result
def _check_and_update_for_early_style_check(run_config: dict) -> None:
"""
This is temporary hack to start style check before docker build if possible
FIXME: need better solution to do style check as soon as possible and as fast as possible w/o dependency on docker job
"""
jobs_to_do = run_config.get("jobs_data", {}).get("jobs_to_do", [])
docker_to_build = run_config.get("docker_data", {}).get("missing_multi", [])
if (
"Style check" in jobs_to_do
and docker_to_build
and "clickhouse/style-test" not in docker_to_build
):
index = jobs_to_do.index("Style check")
jobs_to_do[index] = "Style check early"
def _configure_docker_jobs(
rebuild_all_dockers: bool, docker_digest_or_latest: bool = False
) -> Dict:
# generate docker jobs data
docker_digester = DockerDigester()
imagename_digest_dict = (
docker_digester.get_all_digests()
) # 'image name - digest' mapping
images_info = docker_images_helper.get_images_info()
# a. check missing images
print("Start checking missing images in dockerhub")
# FIXME: we need login as docker manifest inspect goes directly to one of the *.docker.com hosts instead of "registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
# find if it's possible to use the setting of /etc/docker/daemon.json
docker_images_helper.docker_login()
if not rebuild_all_dockers:
missing_multi_dict = check_missing_images_on_dockerhub(imagename_digest_dict)
missing_multi = list(missing_multi_dict)
missing_amd64 = []
missing_aarch64 = []
if not docker_digest_or_latest:
# look for missing arm and amd images only among missing multiarch manifests @missing_multi_dict
# to avoid extra dockerhub api calls
missing_amd64 = list(
check_missing_images_on_dockerhub(missing_multi_dict, "amd64")
)
# FIXME: WA until full arm support: skip not supported arm images
missing_aarch64 = list(
check_missing_images_on_dockerhub(
{
im: digest
for im, digest in missing_multi_dict.items()
if not images_info[im]["only_amd64"]
},
"aarch64",
)
)
else:
# add all images to missing
missing_multi = list(imagename_digest_dict)
missing_amd64 = missing_multi
# FIXME: WA until full arm support: skip not supported arm images
missing_aarch64 = [
name
for name in imagename_digest_dict
if not images_info[name]["only_amd64"]
]
# FIXME: temporary hack, remove after transition to docker digest as tag
if docker_digest_or_latest:
if missing_multi:
print(
f"WARNING: Missing images {list(missing_multi)} - fallback to latest tag"
)
for image in missing_multi:
imagename_digest_dict[image] = "latest"
print("...checking missing images in dockerhub - done")
return {
"images": imagename_digest_dict,
"missing_aarch64": missing_aarch64,
"missing_amd64": missing_amd64,
"missing_multi": missing_multi,
}
def _configure_jobs(
build_digest: str,
docs_digest: str,
job_digester: JobDigester,
s3: S3Helper,
rebuild_all_binaries: bool,
pr_labels: Iterable[str],
commit_tokens: List[str],
) -> Dict:
# a. digest each item from the config
job_digester = JobDigester()
jobs_params: Dict[str, Dict] = {}
jobs_to_do: List[str] = []
jobs_to_skip: List[str] = []
digests: Dict[str, str] = {}
print("Calculating job digests - start")
for job in CI_CONFIG.job_generator():
digest = job_digester.get_job_digest(CI_CONFIG.get_digest_config(job))
digests[job] = digest
print(f" job [{job.rjust(50)}] has digest [{digest}]")
print("Calculating job digests - done")
# b. check if we have something done
path = get_s3_path(build_digest)
done_files = s3.list_prefix(path)
done_files = [file.split("/")[-1] for file in done_files]
print(f"S3 CI files for the build [{build_digest}]: {done_files}")
docs_path = get_s3_path_docs(docs_digest)
done_files_docs = s3.list_prefix(docs_path)
done_files_docs = [file.split("/")[-1] for file in done_files_docs]
print(f"S3 CI files for the docs [{docs_digest}]: {done_files_docs}")
done_files += done_files_docs
for job in digests:
digest = digests[job]
job_config = CI_CONFIG.get_job_config(job)
num_batches: int = job_config.num_batches
batches_to_do: List[int] = []
if job_config.run_by_label:
# this job controled by label, add to todo if it's labe is set in pr
if job_config.run_by_label in pr_labels:
for batch in range(num_batches): # type: ignore
batches_to_do.append(batch)
else:
# this job controled by digest, add to todo if it's not successfully done before
for batch in range(num_batches): # type: ignore
success_flag_name = get_file_flag_name(job, digest, batch, num_batches)
if success_flag_name not in done_files or (
rebuild_all_binaries and is_build_job(job)
):
batches_to_do.append(batch)
if batches_to_do:
jobs_to_do.append(job)
jobs_params[job] = {
"batches": batches_to_do,
"num_batches": num_batches,
}
else:
jobs_to_skip += (job,)
if commit_tokens:
requested_jobs = [
token[len("#job_") :]
for token in commit_tokens
if token.startswith("#job_")
]
assert any(
len(x) > 1 for x in requested_jobs
), f"Invalid job names requested [{requested_jobs}]"
if requested_jobs:
jobs_to_do_requested = []
for job in requested_jobs:
job_with_parents = CI_CONFIG.get_job_with_parents(job)
# always add requested job itself, even if it could be skipped
jobs_to_do_requested.append(job_with_parents[0])
for parent in job_with_parents[1:]:
if parent in jobs_to_do and parent not in jobs_to_do_requested:
jobs_to_do_requested.append(parent)
print(
f"NOTE: Only specific job(s) were requested: [{jobs_to_do_requested}]"
)
jobs_to_do = jobs_to_do_requested
return {
"digests": digests,
"jobs_to_do": jobs_to_do,
"jobs_to_skip": jobs_to_skip,
"jobs_params": jobs_params,
}
def _update_gh_statuses(indata: Dict, s3: S3Helper) -> None:
# This action is required to re-create all GH statuses for skiped jobs, so that ci report can be generated afterwards
temp_path = Path(TEMP_PATH)
if not temp_path.exists():
temp_path.mkdir(parents=True, exist_ok=True)
# clean up before start
for file in temp_path.glob("*.ci"):
file.unlink()
# download all metadata files
path = get_s3_path(indata["build"])
files = s3.download_files( # type: ignore
bucket=S3_BUILDS_BUCKET,
s3_path=path,
file_suffix=".ci",
local_directory=temp_path,
)
print(f"CI metadata files [{files}]")
path = get_s3_path_docs(indata["docs"])
files_docs = s3.download_files( # type: ignore
bucket=S3_BUILDS_BUCKET,
s3_path=path,
file_suffix=".ci",
local_directory=temp_path,
)
print(f"CI docs metadata files [{files_docs}]")
files += files_docs
# parse CI metadata
job_digests = indata["jobs_data"]["digests"]
# create GH status
pr_info = PRInfo()
commit = get_commit(Github(get_best_robot_token(), per_page=100), pr_info.sha)
def run_create_status(job, digest, batch, num_batches):
success_flag_name = get_file_flag_name(job, digest, batch, num_batches)
if success_flag_name in files:
print(f"Going to re-create GH status for job [{job}] sha [{pr_info.sha}]")
job_status = CommitStatusData.load_from_file(
f"{TEMP_PATH}/{success_flag_name}"
) # type: CommitStatusData
assert job_status.status == "success", "BUG!"
commit.create_status(
state=job_status.status,
target_url=job_status.report_url,
description=f"Reused from [{job_status.pr_num}-{job_status.sha[0:8]}]: {job_status.description}",
context=get_check_name(job, batch=batch, num_batches=num_batches),
)
print(f"GH status re-created from file [{success_flag_name}]")
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for job in job_digests:
if is_build_job(job):
# no GH status for build jobs
continue
digest = job_digests[job]
num_batches = CI_CONFIG.get_job_config(job).num_batches
for batch in range(num_batches):
future = executor.submit(
run_create_status, job, digest, batch, num_batches
)
futures.append(future)
done, _ = concurrent.futures.wait(futures)
for future in done:
try:
_ = future.result()
except Exception as e:
raise e
print("Going to update overall CI report")
set_status_comment(commit, pr_info)
print("... CI report update - done")
# clean up
ci_files = list(temp_path.glob("*.ci"))
for file in ci_files:
file.unlink()
def _fetch_commit_tokens(message: str) -> List[str]:
pattern = r"#[\w-]+"
matches = re.findall(pattern, message)
return matches
def main() -> int:
exit_code = 0
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
args = parse_args(parser)
if args.mark_success or args.pre or args.post or args.run:
assert args.infile, "Run config must be provided via --infile"
assert args.job_name, "Job name must be provided via --job-name"
indata: Optional[Dict[str, Any]] = None
if args.infile:
indata = (
json.loads(args.infile)
if not os.path.isfile(args.infile)
else json.load(open(args.infile))
)
assert indata and isinstance(indata, dict), "Invalid --infile json"
result: Dict[str, Any] = {}
s3 = S3Helper()
if args.configure:
GR = GitRunner()
pr_info = PRInfo()
docker_data = {}
git_ref = GR.run(f"{GIT_PREFIX} rev-parse HEAD")
# if '#no-merge-commit' is set in commit message - set git ref to PR branch head to avoid merge-commit
tokens = []
if pr_info.number != 0:
message = GR.run(f"{GIT_PREFIX} log {pr_info.sha} --format=%B -n 1")
tokens = _fetch_commit_tokens(message)
print(f"Found commit message tokens: [{tokens}]")
if "#no-merge-commit" in tokens and CI:
GR.run(f"{GIT_PREFIX} checkout {pr_info.sha}")
git_ref = GR.run(f"{GIT_PREFIX} rev-parse HEAD")
print(
"#no-merge-commit is set in commit message - Setting git ref to PR branch HEAD to not use merge commit"
)
# let's get CH version
version = get_version_from_repo(git=Git(True)).string
print(f"Got CH version for this commit: [{version}]")
docker_data = (
_configure_docker_jobs(
args.rebuild_all_docker, args.docker_digest_or_latest
)
if not args.skip_docker
else {}
)
job_digester = JobDigester()
build_digest = job_digester.get_job_digest(
CI_CONFIG.get_digest_config("package_release")
)
docs_digest = job_digester.get_job_digest(
CI_CONFIG.get_digest_config("Docs check")
)
jobs_data = (
_configure_jobs(
build_digest,
docs_digest,
job_digester,
s3,
args.rebuild_all_binaries,
pr_info.labels,
tokens,
)
if not args.skip_jobs
else {}
)
# conclude results
result["git_ref"] = git_ref
result["version"] = version
result["build"] = build_digest
result["docs"] = docs_digest
result["jobs_data"] = jobs_data
result["docker_data"] = docker_data
if not args.docker_digest_or_latest:
_check_and_update_for_early_style_check(result)
elif args.update_gh_statuses:
assert indata, "Run config must be provided via --infile"
_update_gh_statuses(indata=indata, s3=s3)
elif args.pre:
# remove job status file if any
CommitStatusData.cleanup()
if is_test_job(args.job_name):
assert indata, "Run config must be provided via --infile"
report_path = Path(REPORT_PATH)
report_path.mkdir(exist_ok=True, parents=True)
path = get_s3_path(indata["build"])
files = s3.download_files( # type: ignore
bucket=S3_BUILDS_BUCKET,
s3_path=path,
file_suffix=".json",
local_directory=report_path,
)
print(
f"Pre action done. Report files [{files}] have been downloaded from [{path}] to [{report_path}]"
)
else:
print("Pre action done. Nothing to do for [{args.job_name}]")
elif args.run:
assert CI_CONFIG.get_job_config(
args.job_name
).run_command, f"Run command must be configured in CI_CONFIG for [{args.job_name}] or in GH workflow"
if CI_CONFIG.get_job_config(args.job_name).timeout:
os.environ["KILL_TIMEOUT"] = str(
CI_CONFIG.get_job_config(args.job_name).timeout
)
os.environ["CHECK_NAME"] = args.job_name
run_command = (
"./tests/ci/" + CI_CONFIG.get_job_config(args.job_name).run_command
)
if ".py" in run_command:
run_command = "python3 " + run_command
print(f"Going to start run command [{run_command}]")
process = subprocess.run(
run_command,
stdout=sys.stdout,
stderr=sys.stderr,
text=True,
check=False,
shell=True,
)
if process.returncode == 0:
print(f"Run action done for: [{args.job_name}]")
else:
print(
f"Run action failed for: [{args.job_name}] with exit code [{process.returncode}]"
)
exit_code = process.returncode
elif args.post:
if is_build_job(args.job_name):
report_path = Path(TEMP_PATH) # build-check.py stores report in TEMP_PATH
assert report_path.is_dir(), f"File [{report_path}] is not a dir"
files = list(report_path.glob(f"*{args.job_name}.json")) # type: ignore[arg-type]
assert len(files) == 1, f"Which is the report file: {files}?"
local_report = f"{files[0]}"
report_name = BuildResult.get_report_name(args.job_name)
assert indata
s3_path = Path(get_s3_path(indata["build"])) / report_name
report_url = s3.upload_file(
bucket=S3_BUILDS_BUCKET, file_path=local_report, s3_path=s3_path
)
print(
f"Post action done. Report file [{local_report}] has been uploaded to [{report_url}]"
)
else:
print(f"Post action done. Nothing to do for [{args.job_name}]")
elif args.mark_success:
assert indata, "Run config must be provided via --infile"
job = args.job_name
num_batches = CI_CONFIG.get_job_config(job).num_batches
assert (
num_batches <= 1 or 0 <= args.batch < num_batches
), f"--batch must be provided and in range [0, {num_batches}) for {job}"
# FIXME: find generic design for propagating and handling job status (e.g. stop using statuses in GH api)
# now job ca be build job w/o status data, any other job that exit with 0 with or w/o status data
if is_build_job(job):
# there is no status for build jobs
# create dummy success to mark it as done
job_status = CommitStatusData(
status="success", description="dummy status", report_url="dummy_url"
)
else:
if not CommitStatusData.is_present():
# apperently exit after rerun-helper check
# do nothing, exit without failure
print("ERROR: no status file for job [{job}]")
job_status = CommitStatusData(
status="dummy failure",
description="dummy status",
report_url="dummy_url",
)
else:
# normal case
job_status = CommitStatusData.load_status()
# Storing job data (report_url) to restore OK GH status on job results reuse
if job_status.is_ok():
success_flag_name = get_file_flag_name(
job, indata["jobs_data"]["digests"][job], args.batch, num_batches
)
if not is_docs_job(job):
path = get_s3_path(indata["build"]) + success_flag_name
else:
path = get_s3_path_docs(indata["docs"]) + success_flag_name
job_status.dump_to_file(success_flag_name)
_ = s3.upload_file(
bucket=S3_BUILDS_BUCKET, file_path=success_flag_name, s3_path=path
)
os.remove(success_flag_name)
print(
f"Job [{job}] with digest [{indata['jobs_data']['digests'][job]}] {f'and batch {args.batch}/{num_batches}' if num_batches > 1 else ''} marked as successful. path: [{path}]"
)
else:
print(f"Job [{job}] is not ok, status [{job_status.status}]")
# print results
if args.outfile:
with open(args.outfile, "w") as f:
if isinstance(result, str):
print(result, file=f)
elif isinstance(result, dict):
print(json.dumps(result, indent=2 if args.pretty else None), file=f)
else:
raise AssertionError(f"Unexpected type for 'res': {type(result)}")
else:
if isinstance(result, str):
print(result)
elif isinstance(result, dict):
print(json.dumps(result, indent=2 if args.pretty else None))
else:
raise AssertionError(f"Unexpected type for 'res': {type(result)}")
return exit_code
if __name__ == "__main__":
os.chdir(ROOT_DIR)
sys.exit(main())

View File

@ -3,40 +3,8 @@
import logging
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Dict, Iterable, List, Literal, Optional, Union
@dataclass
class DigestConfig:
# all files, dirs to include into digest, glob supported
include_paths: List[Union[str, Path]] = field(default_factory=list)
# file suffixes to exclude from digest
exclude_files: List[str] = field(default_factory=list)
# directories to exlude from digest
exclude_dirs: List[Union[str, Path]] = field(default_factory=list)
# docker names to include into digest
docker: List[str] = field(default_factory=list)
# git submodules digest
git_submodules: bool = False
@dataclass
class JobConfig:
"""
contains config parameter relevant for job execution in CI workflow
@digest - configures digest calculation for the job
@run_command - will be triggered for the job if omited in CI workflow yml
@timeout
@num_batches - sets number of batches for multi-batch job
"""
digest: DigestConfig = DigestConfig()
run_command: str = ""
timeout: Optional[int] = None
num_batches: int = 1
run_by_label: str = ""
from dataclasses import dataclass
from typing import Callable, Dict, List, Literal, Union
@dataclass
@ -51,21 +19,6 @@ class BuildConfig:
sparse_checkout: bool = False
comment: str = ""
static_binary_name: str = ""
job_config: JobConfig = JobConfig(
digest=DigestConfig(
include_paths=[
"./src",
"./contrib/*-cmake",
"./cmake",
"./base",
"./programs",
"./packages",
],
exclude_files=[".md"],
docker=["clickhouse/binary-builder"],
git_submodules=True,
),
)
def export_env(self, export: bool = False) -> str:
def process(field_name: str, field: Union[bool, str]) -> str:
@ -78,292 +31,29 @@ class BuildConfig:
return "\n".join(process(k, v) for k, v in self.__dict__.items())
@dataclass
class BuildReportConfig:
builds: List[str]
job_config: JobConfig = JobConfig()
@dataclass
class TestConfig:
required_build: str
force_tests: bool = False
job_config: JobConfig = JobConfig()
BuildConfigs = Dict[str, BuildConfig]
BuildsReportConfig = Dict[str, BuildReportConfig]
BuildsReportConfig = Dict[str, List[str]]
TestConfigs = Dict[str, TestConfig]
# common digests configs
compatibility_check_digest = DigestConfig(
include_paths=["./tests/ci/compatibility_check.py"],
docker=["clickhouse/test-old-ubuntu", "clickhouse/test-old-centos"],
)
install_check_digest = DigestConfig(
include_paths=["./tests/ci/install_check.py"],
docker=["clickhouse/install-deb-test", "clickhouse/install-rpm-test"],
)
statless_check_digest = DigestConfig(
include_paths=["./tests/queries/0_stateless/"],
exclude_files=[".md"],
docker=["clickhouse/stateless-test"],
)
stateful_check_digest = DigestConfig(
include_paths=["./tests/queries/1_stateful/"],
exclude_files=[".md"],
docker=["clickhouse/stateful-test"],
)
# FIXME: which tests are stresstest? stateless?
stress_check_digest = DigestConfig(
include_paths=["./tests/queries/0_stateless/"],
exclude_files=[".md"],
docker=["clickhouse/stress-test"],
)
# FIXME: which tests are upgrade? just python?
upgrade_check_digest = DigestConfig(
include_paths=["./tests/ci/upgrade_check.py"],
exclude_files=[".md"],
docker=["clickhouse/upgrade-check"],
)
integration_check_digest = DigestConfig(
include_paths=["./tests/ci/integration_test_check.py", "./tests/integration"],
exclude_files=[".md"],
docker=[
"clickhouse/dotnet-client",
"clickhouse/integration-helper",
"clickhouse/integration-test",
"clickhouse/integration-tests-runner",
"clickhouse/kerberized-hadoop",
"clickhouse/kerberos-kdc",
"clickhouse/mysql-golang-client",
"clickhouse/mysql-java-client",
"clickhouse/mysql-js-client",
"clickhouse/mysql-php-client",
"clickhouse/nginx-dav",
"clickhouse/postgresql-java-client",
],
)
# FIXME: which tests are AST_FUZZER_TEST? just python?
# FIXME: should ast fuzzer test be non-skipable?
ast_fuzzer_check_digest = DigestConfig(
include_paths=["./tests/ci/ast_fuzzer_check.py"],
exclude_files=[".md"],
docker=["clickhouse/fuzzer"],
)
unit_check_digest = DigestConfig(
include_paths=["./tests/ci/unit_tests_check.py"],
exclude_files=[".md"],
docker=["clickhouse/unit-test"],
)
perf_check_digest = DigestConfig(
include_paths=[
"./tests/ci/performance_comparison_check.py",
"./tests/performance/",
],
exclude_files=[".md"],
docker=["clickhouse/performance-comparison"],
)
sqllancer_check_digest = DigestConfig(
include_paths=["./tests/ci/sqlancer_check.py"],
exclude_files=[".md"],
docker=["clickhouse/sqlancer-test"],
)
sqllogic_check_digest = DigestConfig(
include_paths=["./tests/ci/sqllogic_test.py"],
exclude_files=[".md"],
docker=["clickhouse/sqllogic-test"],
)
sqltest_check_digest = DigestConfig(
include_paths=["./tests/ci/sqltest.py"],
exclude_files=[".md"],
docker=["clickhouse/sqltest"],
)
bugfix_validate_check = DigestConfig(
include_paths=[
"./tests/queries/0_stateless/",
"./tests/ci/integration_test_check.py",
"./tests/ci/functional_test_check.py",
"./tests/ci/bugfix_validate_check.py",
],
exclude_files=[".md"],
docker=[
"clickhouse/stateless-test",
"clickhouse/dotnet-client",
"clickhouse/integration-helper",
"clickhouse/integration-test",
"clickhouse/integration-tests-runner",
"clickhouse/kerberized-hadoop",
"clickhouse/kerberos-kdc",
"clickhouse/mysql-golang-client",
"clickhouse/mysql-java-client",
"clickhouse/mysql-js-client",
"clickhouse/mysql-php-client",
"clickhouse/nginx-dav",
"clickhouse/postgresql-java-client",
],
)
# common test params
statless_test_common_params = {
"digest": statless_check_digest,
"run_command": 'functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT',
"timeout": 10800,
}
stateful_test_common_params = {
"digest": stateful_check_digest,
"run_command": 'functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT',
"timeout": 3600,
}
stress_test_common_params = {
"digest": stress_check_digest,
"run_command": "stress_check.py",
}
upgrade_test_common_params = {
"digest": upgrade_check_digest,
"run_command": "upgrade_check.py",
}
astfuzzer_test_common_params = {
"digest": ast_fuzzer_check_digest,
"run_command": "ast_fuzzer_check.py",
}
integration_test_common_params = {
"digest": integration_check_digest,
"run_command": 'integration_test_check.py "$CHECK_NAME"',
}
unit_test_common_params = {
"digest": unit_check_digest,
"run_command": "unit_tests_check.py",
}
perf_test_common_params = {
"digest": perf_check_digest,
"run_command": "performance_comparison_check.py",
}
sqllancer_test_common_params = {
"digest": sqllancer_check_digest,
"run_command": "sqlancer_check.py",
}
sqllogic_test_params = {
"digest": sqllogic_check_digest,
"run_command": "sqllogic_test.py",
"timeout": 10800,
}
sql_test_params = {
"digest": sqltest_check_digest,
"run_command": "sqltest.py",
"timeout": 10800,
}
@dataclass
class CiConfig:
"""
Contains configs for ALL jobs in CI pipeline
each config item in the below dicts should be an instance of JobConfig class or inherited from it
"""
build_config: BuildConfigs
builds_report_config: BuildsReportConfig
test_configs: TestConfigs
other_jobs_configs: TestConfigs
def get_job_config(self, check_name: str) -> JobConfig:
res = None
for config in (
self.build_config,
self.builds_report_config,
self.test_configs,
self.other_jobs_configs,
):
if check_name in config: # type: ignore
res = config[check_name].job_config # type: ignore
break
assert (
res is not None
), f"Invalid check_name or CI_CONFIG outdated, config not found for [{check_name}]"
return res # type: ignore
def get_job_with_parents(self, check_name: str) -> List[str]:
def _normalize_string(input_string: str) -> str:
lowercase_string = input_string.lower()
normalized_string = (
lowercase_string.replace(" ", "_")
.replace("-", "_")
.replace("(", "")
.replace(")", "")
.replace(",", "")
)
return normalized_string
res = []
check_name = _normalize_string(check_name)
for config in (
self.build_config,
self.builds_report_config,
self.test_configs,
self.other_jobs_configs,
):
for job_name in config: # type: ignore
if check_name == _normalize_string(job_name):
res.append(job_name)
if isinstance(config[job_name], TestConfig): # type: ignore
assert config[
job_name
].required_build, f"Error: Experimantal feature... Not supported job [{job_name}]" # type: ignore
res.append(config[job_name].required_build) # type: ignore
res.append("Fast tests")
res.append("Style check")
elif isinstance(config[job_name], BuildConfig): # type: ignore
res.append("Fast tests")
res.append("Style check")
else:
assert (
False
), f"check commit message tags or FIXME: request for job [{check_name}] not yet supported"
break
assert (
res
), f"Error: Experimantal feature... Invlid request or not supported job [{check_name}]"
return res
def get_digest_config(self, check_name: str) -> DigestConfig:
res = None
for config in (
self.other_jobs_configs,
self.build_config,
self.builds_report_config,
self.test_configs,
):
if check_name in config: # type: ignore
res = config[check_name].job_config.digest # type: ignore
assert (
res
), f"Invalid check_name or CI_CONFIG outdated, config not found for [{check_name}]"
return res # type: ignore
def job_generator(self) -> Iterable[str]:
"""
traverses all check names in CI pipeline
"""
for config in (
self.other_jobs_configs,
self.build_config,
self.builds_report_config,
self.test_configs,
):
for check_name in config: # type: ignore
yield check_name
def get_builds_for_report(self, report_name: str) -> List[str]:
return self.builds_report_config[report_name].builds
def validate(self) -> None:
errors = []
for name, build_config in self.build_config.items():
build_in_reports = False
for _, report_config in self.builds_report_config.items():
if name in report_config.builds:
for report_config in self.builds_report_config.values():
if name in report_config:
build_in_reports = True
break
# All build configs must belong to build_report_config
@ -381,8 +71,7 @@ class CiConfig:
f"Build name {name} does not match 'name' value '{build_config.name}'"
)
# All build_report_config values should be in build_config.keys()
for build_report_name, build_report_config in self.builds_report_config.items():
build_names = build_report_config.builds
for build_report_name, build_names in self.builds_report_config.items():
missed_names = [
name for name in build_names if name not in self.build_config.keys()
]
@ -545,301 +234,104 @@ CI_CONFIG = CiConfig(
),
},
builds_report_config={
"ClickHouse build check": BuildReportConfig(
builds=[
"package_release",
"package_aarch64",
"package_asan",
"package_ubsan",
"package_tsan",
"package_msan",
"package_debug",
"binary_release",
"fuzzers",
]
),
"ClickHouse special build check": BuildReportConfig(
builds=[
"binary_tidy",
"binary_darwin",
"binary_aarch64",
"binary_aarch64_v80compat",
"binary_freebsd",
"binary_darwin_aarch64",
"binary_ppc64le",
"binary_riscv64",
"binary_s390x",
"binary_amd64_compat",
"binary_amd64_musl",
]
),
},
other_jobs_configs={
"Docker server and keeper images": TestConfig(
"",
job_config=JobConfig(
digest=DigestConfig(
include_paths=[
"tests/ci/docker_server.py",
"./docker/server",
"./docker/keeper",
]
)
),
),
"Docs check": TestConfig(
"",
job_config=JobConfig(
digest=DigestConfig(
include_paths=["**/*.md", "./docs", "tests/ci/docs_check.py"],
docker=["clickhouse/docs-builder"],
),
),
),
"Fast tests": TestConfig(
"",
job_config=JobConfig(
digest=DigestConfig(
include_paths=["./tests/queries/0_stateless/"],
exclude_files=[".md"],
docker=["clickhouse/fasttest"],
)
),
),
"Style check": TestConfig(
"",
job_config=JobConfig(
digest=DigestConfig(
include_paths=["."], exclude_dirs=[".git", "__pycache__"]
)
),
),
"tests bugfix validate check": TestConfig(
"",
# we run this check by label - no digest required
job_config=JobConfig(run_by_label="pr-bugfix"),
),
"ClickHouse build check": [
"package_release",
"package_aarch64",
"package_asan",
"package_ubsan",
"package_tsan",
"package_msan",
"package_debug",
"binary_release",
"fuzzers",
],
"ClickHouse special build check": [
"binary_tidy",
"binary_darwin",
"binary_aarch64",
"binary_aarch64_v80compat",
"binary_freebsd",
"binary_darwin_aarch64",
"binary_ppc64le",
"binary_riscv64",
"binary_s390x",
"binary_amd64_compat",
"binary_amd64_musl",
],
},
test_configs={
"Install packages (amd64)": TestConfig(
"package_release", job_config=JobConfig(digest=install_check_digest)
),
"Install packages (arm64)": TestConfig(
"package_aarch64", job_config=JobConfig(digest=install_check_digest)
),
"Stateful tests (asan)": TestConfig(
"package_asan", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (tsan)": TestConfig(
"package_tsan", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (msan)": TestConfig(
"package_msan", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (ubsan)": TestConfig(
"package_ubsan", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (debug)": TestConfig(
"package_debug", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (release)": TestConfig(
"package_release", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (aarch64)": TestConfig(
"package_aarch64", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (release, DatabaseOrdinary)": TestConfig(
"package_release", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
# "Stateful tests (release, DatabaseReplicated)": TestConfig(
# "package_release", job_config=JobConfig(**stateful_test_common_params) # type: ignore
# ),
"Install packages (amd64)": TestConfig("package_release"),
"Install packages (arm64)": TestConfig("package_aarch64"),
"Stateful tests (asan)": TestConfig("package_asan"),
"Stateful tests (tsan)": TestConfig("package_tsan"),
"Stateful tests (msan)": TestConfig("package_msan"),
"Stateful tests (ubsan)": TestConfig("package_ubsan"),
"Stateful tests (debug)": TestConfig("package_debug"),
"Stateful tests (release)": TestConfig("package_release"),
"Stateful tests (aarch64)": TestConfig("package_aarch64"),
"Stateful tests (release, DatabaseOrdinary)": TestConfig("package_release"),
"Stateful tests (release, DatabaseReplicated)": TestConfig("package_release"),
# Stateful tests for parallel replicas
"Stateful tests (release, ParallelReplicas)": TestConfig(
"package_release", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (debug, ParallelReplicas)": TestConfig(
"package_debug", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (asan, ParallelReplicas)": TestConfig(
"package_asan", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (msan, ParallelReplicas)": TestConfig(
"package_msan", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (ubsan, ParallelReplicas)": TestConfig(
"package_ubsan", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (tsan, ParallelReplicas)": TestConfig(
"package_tsan", job_config=JobConfig(**stateful_test_common_params) # type: ignore
),
"Stateful tests (release, ParallelReplicas)": TestConfig("package_release"),
"Stateful tests (debug, ParallelReplicas)": TestConfig("package_debug"),
"Stateful tests (asan, ParallelReplicas)": TestConfig("package_asan"),
"Stateful tests (msan, ParallelReplicas)": TestConfig("package_msan"),
"Stateful tests (ubsan, ParallelReplicas)": TestConfig("package_ubsan"),
"Stateful tests (tsan, ParallelReplicas)": TestConfig("package_tsan"),
# End stateful tests for parallel replicas
"Stateless tests (asan)": TestConfig(
"package_asan",
job_config=JobConfig(num_batches=4, **statless_test_common_params), # type: ignore
),
"Stateless tests (tsan)": TestConfig(
"package_tsan",
job_config=JobConfig(num_batches=5, **statless_test_common_params), # type: ignore
),
"Stateless tests (msan)": TestConfig(
"package_msan",
job_config=JobConfig(num_batches=6, **statless_test_common_params), # type: ignore
),
"Stateless tests (ubsan)": TestConfig(
"package_ubsan",
job_config=JobConfig(num_batches=2, **statless_test_common_params), # type: ignore
),
"Stateless tests (debug)": TestConfig(
"package_debug",
job_config=JobConfig(num_batches=5, **statless_test_common_params), # type: ignore
),
"Stateless tests (release)": TestConfig(
"package_release", job_config=JobConfig(**statless_test_common_params) # type: ignore
),
"Stateless tests (aarch64)": TestConfig(
"package_aarch64", job_config=JobConfig(**statless_test_common_params) # type: ignore
),
"Stateless tests (release, analyzer)": TestConfig(
"package_release", job_config=JobConfig(**statless_test_common_params) # type: ignore
),
"Stateless tests (release, DatabaseOrdinary)": TestConfig(
"package_release", job_config=JobConfig(**statless_test_common_params) # type: ignore
),
"Stateless tests (release, DatabaseReplicated)": TestConfig(
"package_release",
job_config=JobConfig(num_batches=4, **statless_test_common_params), # type: ignore
),
"Stateless tests (release, s3 storage)": TestConfig(
"package_release",
job_config=JobConfig(num_batches=2, **statless_test_common_params), # type: ignore
),
"Stateless tests (debug, s3 storage)": TestConfig(
"package_debug",
job_config=JobConfig(num_batches=6, **statless_test_common_params), # type: ignore
),
"Stateless tests (tsan, s3 storage)": TestConfig(
"package_tsan",
job_config=JobConfig(num_batches=5, **statless_test_common_params), # type: ignore
),
"Stress test (asan)": TestConfig(
"package_asan", job_config=JobConfig(**stress_test_common_params) # type: ignore
),
"Stress test (tsan)": TestConfig(
"package_tsan", job_config=JobConfig(**stress_test_common_params) # type: ignore
),
"Stress test (ubsan)": TestConfig(
"package_ubsan", job_config=JobConfig(**stress_test_common_params) # type: ignore
),
"Stress test (msan)": TestConfig(
"package_msan", job_config=JobConfig(**stress_test_common_params) # type: ignore
),
"Stress test (debug)": TestConfig(
"package_debug", job_config=JobConfig(**stress_test_common_params) # type: ignore
),
"Upgrade check (asan)": TestConfig(
"package_asan", job_config=JobConfig(**upgrade_test_common_params) # type: ignore
),
"Upgrade check (tsan)": TestConfig(
"package_tsan", job_config=JobConfig(**upgrade_test_common_params) # type: ignore
),
"Upgrade check (msan)": TestConfig(
"package_msan", job_config=JobConfig(**upgrade_test_common_params) # type: ignore
),
"Upgrade check (debug)": TestConfig(
"package_debug", job_config=JobConfig(**upgrade_test_common_params) # type: ignore
),
"Integration tests (asan)": TestConfig(
"package_asan",
job_config=JobConfig(num_batches=4, **integration_test_common_params), # type: ignore
),
"Integration tests (asan, analyzer)": TestConfig(
"package_asan",
job_config=JobConfig(num_batches=6, **integration_test_common_params), # type: ignore
),
"Integration tests (tsan)": TestConfig(
"package_tsan",
job_config=JobConfig(num_batches=6, **integration_test_common_params), # type: ignore
),
# FIXME: currently no wf has this job. Try to enable
# "Integration tests (msan)": TestConfig("package_msan", job_config=JobConfig(num_batches=6, **integration_test_common_params) # type: ignore
# ),
"Integration tests (release)": TestConfig(
"package_release",
job_config=JobConfig(num_batches=4, **integration_test_common_params), # type: ignore
),
"Integration tests flaky check (asan)": TestConfig(
"package_asan", job_config=JobConfig(**integration_test_common_params) # type: ignore
),
"Compatibility check (amd64)": TestConfig(
"package_release", job_config=JobConfig(digest=compatibility_check_digest)
),
"Compatibility check (aarch64)": TestConfig(
"package_aarch64", job_config=JobConfig(digest=compatibility_check_digest)
),
"Unit tests (release)": TestConfig(
"binary_release", job_config=JobConfig(**unit_test_common_params) # type: ignore
),
"Unit tests (asan)": TestConfig(
"package_asan", job_config=JobConfig(**unit_test_common_params) # type: ignore
),
"Unit tests (msan)": TestConfig(
"package_msan", job_config=JobConfig(**unit_test_common_params) # type: ignore
),
"Unit tests (tsan)": TestConfig(
"package_tsan", job_config=JobConfig(**unit_test_common_params) # type: ignore
),
"Unit tests (ubsan)": TestConfig(
"package_ubsan", job_config=JobConfig(**unit_test_common_params) # type: ignore
),
"AST fuzzer (debug)": TestConfig(
"package_debug", job_config=JobConfig(**astfuzzer_test_common_params) # type: ignore
),
"AST fuzzer (asan)": TestConfig(
"package_asan", job_config=JobConfig(**astfuzzer_test_common_params) # type: ignore
),
"AST fuzzer (msan)": TestConfig(
"package_msan", job_config=JobConfig(**astfuzzer_test_common_params) # type: ignore
),
"AST fuzzer (tsan)": TestConfig(
"package_tsan", job_config=JobConfig(**astfuzzer_test_common_params) # type: ignore
),
"AST fuzzer (ubsan)": TestConfig(
"package_ubsan", job_config=JobConfig(**astfuzzer_test_common_params) # type: ignore
),
"Stateless tests flaky check (asan)": TestConfig(
# replace to non-default
"package_asan",
job_config=JobConfig(**{**statless_test_common_params, "timeout": 3600}), # type: ignore
),
# FIXME: add digest and params
"Stateless tests (asan)": TestConfig("package_asan"),
"Stateless tests (tsan)": TestConfig("package_tsan"),
"Stateless tests (msan)": TestConfig("package_msan"),
"Stateless tests (ubsan)": TestConfig("package_ubsan"),
"Stateless tests (debug)": TestConfig("package_debug"),
"Stateless tests (release)": TestConfig("package_release"),
"Stateless tests (aarch64)": TestConfig("package_aarch64"),
"Stateless tests (release, wide parts enabled)": TestConfig("package_release"),
"Stateless tests (release, analyzer)": TestConfig("package_release"),
"Stateless tests (release, DatabaseOrdinary)": TestConfig("package_release"),
"Stateless tests (release, DatabaseReplicated)": TestConfig("package_release"),
"Stateless tests (release, s3 storage)": TestConfig("package_release"),
"Stateless tests (debug, s3 storage)": TestConfig("package_debug"),
"Stateless tests (tsan, s3 storage)": TestConfig("package_tsan"),
"Stress test (asan)": TestConfig("package_asan"),
"Stress test (tsan)": TestConfig("package_tsan"),
"Stress test (ubsan)": TestConfig("package_ubsan"),
"Stress test (msan)": TestConfig("package_msan"),
"Stress test (debug)": TestConfig("package_debug"),
"Upgrade check (asan)": TestConfig("package_asan"),
"Upgrade check (tsan)": TestConfig("package_tsan"),
"Upgrade check (msan)": TestConfig("package_msan"),
"Upgrade check (debug)": TestConfig("package_debug"),
"Integration tests (asan)": TestConfig("package_asan"),
"Integration tests (asan, analyzer)": TestConfig("package_asan"),
"Integration tests (tsan)": TestConfig("package_tsan"),
"Integration tests (release)": TestConfig("package_release"),
"Integration tests (msan)": TestConfig("package_msan"),
"Integration tests flaky check (asan)": TestConfig("package_asan"),
"Compatibility check (amd64)": TestConfig("package_release"),
"Compatibility check (aarch64)": TestConfig("package_aarch64"),
"Unit tests (release)": TestConfig("binary_release"),
"Unit tests (asan)": TestConfig("package_asan"),
"Unit tests (msan)": TestConfig("package_msan"),
"Unit tests (tsan)": TestConfig("package_tsan"),
"Unit tests (ubsan)": TestConfig("package_ubsan"),
"AST fuzzer (debug)": TestConfig("package_debug"),
"AST fuzzer (asan)": TestConfig("package_asan"),
"AST fuzzer (msan)": TestConfig("package_msan"),
"AST fuzzer (tsan)": TestConfig("package_tsan"),
"AST fuzzer (ubsan)": TestConfig("package_ubsan"),
"Stateless tests flaky check (asan)": TestConfig("package_asan"),
"ClickHouse Keeper Jepsen": TestConfig("binary_release"),
# FIXME: add digest and params
"ClickHouse Server Jepsen": TestConfig("binary_release"),
"Performance Comparison": TestConfig(
"package_release",
job_config=JobConfig(num_batches=4, **perf_test_common_params), # type: ignore
),
"Performance Comparison Aarch64": TestConfig(
"package_aarch64",
job_config=JobConfig(num_batches=4, run_by_label="pr-performance", **perf_test_common_params), # type: ignore
),
"SQLancer (release)": TestConfig(
"package_release", job_config=JobConfig(**sqllancer_test_common_params) # type: ignore
),
"SQLancer (debug)": TestConfig(
"package_debug", job_config=JobConfig(**sqllancer_test_common_params) # type: ignore
),
"Sqllogic test (release)": TestConfig(
"package_release", job_config=JobConfig(**sqllogic_test_params) # type: ignore
),
"SQLTest": TestConfig(
"package_release", job_config=JobConfig(**sql_test_params) # type: ignore
),
"Performance Comparison": TestConfig("package_release"),
"Performance Comparison Aarch64": TestConfig("package_aarch64"),
"SQLancer (release)": TestConfig("package_release"),
"SQLancer (debug)": TestConfig("package_debug"),
"Sqllogic test (release)": TestConfig("package_release"),
"SQLTest": TestConfig("package_release"),
"ClickBench (amd64)": TestConfig("package_release"),
"ClickBench (aarch64)": TestConfig("package_aarch64"),
# FIXME: add digest and params
"libFuzzer tests": TestConfig("fuzzers"), # type: ignore
"libFuzzer tests": TestConfig("fuzzers"),
},
)
CI_CONFIG.validate()

View File

@ -25,8 +25,8 @@ from commit_status_helper import (
post_commit_status,
update_mergeable_check,
)
from docker_images_helper import get_docker_image, pull_image, DockerImage
from env_helper import TEMP_PATH, REPORT_PATH
from docker_pull_helper import DockerImage, get_image_with_version
from env_helper import TEMP_PATH, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import FORCE_TESTS_LABEL, PRInfo
from s3_helper import S3Helper
@ -123,7 +123,7 @@ def main():
temp_path = Path(TEMP_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path = Path(REPORT_PATH)
reports_path = Path(REPORTS_PATH)
args = parse_args()
check_name = args.check_name
@ -141,7 +141,7 @@ def main():
sys.exit(0)
image_name = get_image_name()
docker_image = pull_image(get_docker_image(image_name))
docker_image = get_image_with_version(reports_path, image_name)
packages_path = temp_path / "packages"
packages_path.mkdir(parents=True, exist_ok=True)
@ -205,9 +205,7 @@ def main():
)
print(f"::notice:: {check_name} Report url: {report_url}")
post_commit_status(
commit, state, report_url, description, check_name, pr_info, dump_to_file=True
)
post_commit_status(commit, state, report_url, description, check_name, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -1,25 +1,23 @@
#!/usr/bin/env python3
from collections import defaultdict
import json
from pathlib import Path
from typing import Dict, List, Optional, Union
import csv
import logging
import time
from dataclasses import asdict, dataclass
from github import Github
from github.Commit import Commit
from github.CommitStatus import CommitStatus
from github.GithubException import GithubException
from github.GithubObject import NotSet
from github.GithubObject import _NotSetType, NotSet as NotSet
from github.IssueComment import IssueComment
from github.PullRequest import PullRequest
from github.Repository import Repository
from ci_config import CI_CONFIG, REQUIRED_CHECKS, CHECK_DESCRIPTIONS, CheckDescription
from env_helper import GITHUB_REPOSITORY, GITHUB_RUN_URL, TEMP_PATH
from env_helper import GITHUB_REPOSITORY, GITHUB_RUN_URL
from pr_info import PRInfo, SKIP_MERGEABLE_CHECK_LABEL
from report import (
ERROR,
@ -39,7 +37,6 @@ CommitStatuses = List[CommitStatus]
MERGEABLE_NAME = "Mergeable Check"
GH_REPO = None # type: Optional[Repository]
CI_STATUS_NAME = "CI running"
STATUS_FILE_PATH = Path(TEMP_PATH) / "status.json"
class RerunHelper:
@ -95,11 +92,10 @@ def get_commit(gh: Github, commit_sha: str, retry_count: int = RETRY) -> Commit:
def post_commit_status(
commit: Commit,
state: str,
report_url: Optional[str] = None,
description: Optional[str] = None,
check_name: Optional[str] = None,
report_url: Union[_NotSetType, str] = NotSet,
description: Union[_NotSetType, str] = NotSet,
check_name: Union[_NotSetType, str] = NotSet,
pr_info: Optional[PRInfo] = None,
dump_to_file: bool = False,
) -> None:
"""The parameters are given in the same order as for commit.create_status,
if an optional parameter `pr_info` is given, the `set_status_comment` functions
@ -108,9 +104,9 @@ def post_commit_status(
try:
commit.create_status(
state=state,
target_url=report_url if report_url is not None else NotSet,
description=description if description is not None else NotSet,
context=check_name if check_name is not None else NotSet,
target_url=report_url,
description=description,
context=check_name,
)
break
except Exception as ex:
@ -133,15 +129,6 @@ def post_commit_status(
if not status_updated:
logging.error("Failed to update the status comment, continue anyway")
if dump_to_file:
assert pr_info
CommitStatusData(
status=state,
description=description or "",
report_url=report_url or "",
sha=pr_info.sha,
pr_num=pr_info.number,
).dump_status()
STATUS_ICON_MAP = defaultdict(
@ -322,55 +309,6 @@ def post_commit_status_to_file(
out.writerow([state, report_url, description])
@dataclass
class CommitStatusData:
"""
if u about to add/remove fields in this class be causious that it dumps/loads to/from files (see it's method)
- you might want to add default values for new fields so that it won't break with old files
"""
status: str
report_url: str
description: str
sha: str = "deadbeaf"
pr_num: int = -1
@classmethod
def _filter_dict(cls, data: dict) -> Dict:
return {k: v for k, v in data.items() if k in cls.__annotations__.keys()}
@classmethod
def load_from_file(cls, file_path: Union[Path, str]): # type: ignore
res = {}
with open(file_path, "r") as json_file:
res = json.load(json_file)
return CommitStatusData(**cls._filter_dict(res))
@classmethod
def load_status(cls): # type: ignore
return cls.load_from_file(STATUS_FILE_PATH)
@classmethod
def is_present(cls) -> bool:
return STATUS_FILE_PATH.is_file()
def dump_status(self) -> None:
STATUS_FILE_PATH.parent.mkdir(parents=True, exist_ok=True)
self.dump_to_file(STATUS_FILE_PATH)
def dump_to_file(self, file_path: Union[Path, str]) -> None:
file_path = Path(file_path) or STATUS_FILE_PATH
with open(file_path, "w") as json_file:
json.dump(asdict(self), json_file)
def is_ok(self):
return self.status == SUCCESS
@staticmethod
def cleanup():
STATUS_FILE_PATH.unlink(missing_ok=True)
def get_commit_filtered_statuses(commit: Commit) -> CommitStatuses:
"""
Squash statuses to latest state

View File

@ -16,8 +16,8 @@ from clickhouse_helper import (
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import RerunHelper, get_commit, post_commit_status
from docker_images_helper import DockerImage, get_docker_image, pull_image
from env_helper import TEMP_PATH, REPORT_PATH
from docker_pull_helper import get_images_with_versions, DockerImage
from env_helper import TEMP_PATH, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import TestResults, TestResult
@ -145,9 +145,8 @@ def main():
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path.mkdir(parents=True, exist_ok=True)
reports_path = Path(REPORTS_PATH)
pr_info = PRInfo()
@ -188,14 +187,15 @@ def main():
run_commands.extend(check_glibc_commands)
if args.check_distributions:
centos_image = pull_image(get_docker_image(IMAGE_CENTOS))
ubuntu_image = pull_image(get_docker_image(IMAGE_UBUNTU))
docker_images = get_images_with_versions(
reports_path, [IMAGE_CENTOS, IMAGE_UBUNTU]
)
check_distributions_commands = get_run_commands_distributions(
packages_path,
result_path,
server_log_path,
centos_image,
ubuntu_image,
docker_images[0],
docker_images[1],
)
run_commands.extend(check_distributions_commands)
@ -239,15 +239,7 @@ def main():
args.check_name,
)
print(f"::notice ::Report url: {report_url}")
post_commit_status(
commit,
state,
report_url,
description,
args.check_name,
pr_info,
dump_to_file=True,
)
post_commit_status(commit, state, report_url, description, args.check_name, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -1,20 +1,11 @@
#!/usr/bin/env python3
import bisect
from dataclasses import asdict
from hashlib import md5
from logging import getLogger
from pathlib import Path
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Union
from typing import TYPE_CHECKING, Iterable, Optional
from sys import modules
from docker_images_helper import get_images_info
from ci_config import DigestConfig
from git_helper import Runner
DOCKER_DIGEST_LEN = 12
JOB_DIGEST_LEN = 10
if TYPE_CHECKING:
from hashlib import ( # pylint:disable=no-name-in-module,ungrouped-imports
_Hash as HASH,
@ -32,55 +23,46 @@ def _digest_file(file: Path, hash_object: HASH) -> None:
hash_object.update(chunk)
def digest_path(
path: Union[Path, str],
hash_object: Optional[HASH] = None,
exclude_files: Optional[Iterable[str]] = None,
exclude_dirs: Optional[Iterable[Union[Path, str]]] = None,
) -> HASH:
def _digest_directory(directory: Path, hash_object: HASH) -> None:
assert directory.is_dir()
for p in sorted(directory.rglob("*")):
if p.is_symlink() and p.is_dir():
# The symlink directory is not listed recursively, so we process it manually
(_digest_directory(p, hash_object))
if p.is_file():
(_digest_file(p, hash_object))
def digest_path(path: Path, hash_object: Optional[HASH] = None) -> HASH:
"""Calculates md5 (or updates existing hash_object) hash of the path, either it's
directory or file
@exclude_files - file extension(s) or any filename suffix(es) that you want to exclude from digest
@exclude_dirs - dir names that you want to exclude from digest
"""
path = Path(path)
directory or file"""
hash_object = hash_object or md5()
if path.is_file():
if not exclude_files or not any(path.name.endswith(x) for x in exclude_files):
_digest_file(path, hash_object)
elif path.is_dir():
if not exclude_dirs or not any(path.name == x for x in exclude_dirs):
for p in sorted(path.iterdir()):
digest_path(p, hash_object, exclude_files, exclude_dirs)
else:
pass # broken symlink
if path.is_dir():
_digest_directory(path, hash_object)
elif path.is_file():
_digest_file(path, hash_object)
return hash_object
def digest_paths(
paths: Iterable[Union[Path, str]],
hash_object: Optional[HASH] = None,
exclude_files: Optional[Iterable[str]] = None,
exclude_dirs: Optional[Iterable[Union[Path, str]]] = None,
) -> HASH:
def digest_paths(paths: Iterable[Path], hash_object: Optional[HASH] = None) -> HASH:
"""Calculates aggregated md5 (or updates existing hash_object) hash of passed paths.
The order is processed as given"""
hash_object = hash_object or md5()
paths_all: List[Path] = []
for p in paths:
if isinstance(p, str) and "*" in p:
for path in Path(".").glob(p):
bisect.insort(paths_all, path.absolute()) # type: ignore[misc]
else:
bisect.insort(paths_all, Path(p).absolute()) # type: ignore[misc]
for path in paths_all: # type: ignore
for path in paths:
if path.exists():
digest_path(path, hash_object, exclude_files, exclude_dirs)
else:
raise AssertionError(f"Invalid path: {path}")
digest_path(path, hash_object)
return hash_object
def digest_consistent_paths(
paths: Iterable[Path], hash_object: Optional[HASH] = None
) -> HASH:
"""Calculates aggregated md5 (or updates existing hash_object) hash of passed paths.
The order doesn't matter, paths are converted to `absolute` and ordered before
calculation"""
return digest_paths(sorted(p.absolute() for p in paths), hash_object)
def digest_script(path_str: str) -> HASH:
"""Accepts value of the __file__ executed script and calculates the md5 hash for it"""
path = Path(path_str)
@ -96,85 +78,3 @@ def digest_script(path_str: str) -> HASH:
logger.warning("The modules size has changed, retry calculating digest")
return digest_script(path_str)
return md5_hash
def digest_string(string: str) -> str:
hash_object = md5()
hash_object.update(string.encode("utf-8"))
return hash_object.hexdigest()
class DockerDigester:
EXCLUDE_FILES = [".md"]
def __init__(self):
self.images_info = get_images_info()
assert self.images_info, "Fetch image info error"
def get_image_digest(self, name: str) -> str:
assert isinstance(name, str)
deps = [name]
digest = None
while deps:
dep_name = deps.pop(0)
digest = digest_path(
self.images_info[dep_name]["path"],
digest,
exclude_files=self.EXCLUDE_FILES,
)
deps += self.images_info[dep_name]["deps"]
assert digest
return digest.hexdigest()[0:DOCKER_DIGEST_LEN]
def get_all_digests(self) -> Dict:
res = {}
for image_name in self.images_info:
res[image_name] = self.get_image_digest(image_name)
return res
class JobDigester:
def __init__(self):
self.dd = DockerDigester()
self.cache: Dict[str, str] = {}
@staticmethod
def _get_config_hash(digest_config: DigestConfig) -> str:
data_dict = asdict(digest_config)
hash_obj = md5()
hash_obj.update(str(data_dict).encode())
hash_string = hash_obj.hexdigest()
return hash_string
def get_job_digest(self, digest_config: DigestConfig) -> str:
if not digest_config.include_paths:
# job is not for digest
return "f" * JOB_DIGEST_LEN
cache_key = self._get_config_hash(digest_config)
if cache_key in self.cache:
return self.cache[cache_key]
digest_str: List[str] = []
if digest_config.include_paths:
digest = digest_paths(
digest_config.include_paths,
hash_object=None,
exclude_files=digest_config.exclude_files,
exclude_dirs=digest_config.exclude_dirs,
)
digest_str += (digest.hexdigest(),)
if digest_config.docker:
for image_name in digest_config.docker:
image_digest = self.dd.get_image_digest(image_name)
digest_str += (image_digest,)
if digest_config.git_submodules:
submodules_sha = Runner().run(
"git submodule | awk '{print $1}' | sed 's/^[+-]//'"
)
assert submodules_sha and len(submodules_sha) > 10
submodules_digest = digest_string("-".join(submodules_sha))
digest_str += (submodules_digest,)
res = digest_string("-".join(digest_str))[0:JOB_DIGEST_LEN]
self.cache[cache_key] = res
return res

View File

@ -2,43 +2,210 @@
import argparse
import json
import logging
import os
import platform
import subprocess
import time
import sys
from pathlib import Path
from typing import List, Optional, Tuple
from typing import Any, List, Optional, Set, Tuple, Union
from github import Github
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from commit_status_helper import format_description, get_commit, post_commit_status
from env_helper import ROOT_DIR, RUNNER_TEMP, GITHUB_RUN_URL
from get_robot_token import get_best_robot_token
from env_helper import REPO_COPY, RUNNER_TEMP, GITHUB_RUN_URL
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
from pr_info import PRInfo
from report import TestResults, TestResult
from s3_helper import S3Helper
from stopwatch import Stopwatch
from tee_popen import TeePopen
from upload_result_helper import upload_results
from docker_images_helper import DockerImageData, docker_login, get_images_oredered_list
from docker_images_helper import ImagesDict, IMAGES_FILE_PATH, get_images_dict
NAME = "Push to Dockerhub"
TEMP_PATH = Path(RUNNER_TEMP) / "docker_images_check"
TEMP_PATH.mkdir(parents=True, exist_ok=True)
class DockerImage:
def __init__(
self,
path: str,
repo: str,
only_amd64: bool,
parent: Optional["DockerImage"] = None,
gh_repo: str = REPO_COPY,
):
assert not path.startswith("/")
self.path = path
self.full_path = Path(gh_repo) / path
self.repo = repo
self.only_amd64 = only_amd64
self.parent = parent
self.built = False
def __eq__(self, other) -> bool: # type: ignore
"""Is used to check if DockerImage is in a set or not"""
return (
self.path == other.path
and self.repo == self.repo
and self.only_amd64 == other.only_amd64
)
def __lt__(self, other: Any) -> bool:
if not isinstance(other, DockerImage):
return False
if self.parent and not other.parent:
return False
if not self.parent and other.parent:
return True
if self.path < other.path:
return True
if self.repo < other.repo:
return True
return False
def __hash__(self):
return hash(self.path)
def __str__(self):
return self.repo
def __repr__(self):
return f"DockerImage(path={self.path},repo={self.repo},parent={self.parent})"
def get_changed_docker_images(
pr_info: PRInfo, images_dict: ImagesDict
) -> Set[DockerImage]:
if not images_dict:
return set()
files_changed = pr_info.changed_files
logging.info(
"Changed files for PR %s @ %s: %s",
pr_info.number,
pr_info.sha,
str(files_changed),
)
changed_images = []
for dockerfile_dir, image_description in images_dict.items():
for f in files_changed:
if f.startswith(dockerfile_dir):
name = image_description["name"]
only_amd64 = image_description.get("only_amd64", False)
logging.info(
"Found changed file '%s' which affects "
"docker image '%s' with path '%s'",
f,
name,
dockerfile_dir,
)
changed_images.append(DockerImage(dockerfile_dir, name, only_amd64))
break
# The order is important: dependents should go later than bases, so that
# they are built with updated base versions.
index = 0
while index < len(changed_images):
image = changed_images[index]
for dependent in images_dict[image.path]["dependent"]:
logging.info(
"Marking docker image '%s' as changed because it "
"depends on changed docker image '%s'",
dependent,
image,
)
name = images_dict[dependent]["name"]
only_amd64 = images_dict[dependent].get("only_amd64", False)
changed_images.append(DockerImage(dependent, name, only_amd64, image))
index += 1
if index > 5 * len(images_dict):
# Sanity check to prevent infinite loop.
raise RuntimeError(
f"Too many changed docker images, this is a bug. {changed_images}"
)
# With reversed changed_images set will use images with parents first, and
# images without parents then
result = set(reversed(changed_images))
logging.info(
"Changed docker images for PR %s @ %s: '%s'",
pr_info.number,
pr_info.sha,
result,
)
return result
def gen_versions(
pr_info: PRInfo, suffix: Optional[str]
) -> Tuple[List[str], Union[str, List[str]]]:
pr_commit_version = str(pr_info.number) + "-" + pr_info.sha
# The order is important, PR number is used as cache during the build
versions = [str(pr_info.number), pr_commit_version]
result_version = pr_commit_version # type: Union[str, List[str]]
if pr_info.number == 0 and pr_info.base_ref == "master":
# First get the latest for cache
versions.insert(0, "latest")
if suffix:
# We should build architecture specific images separately and merge a
# manifest lately in a different script
versions = [f"{v}-{suffix}" for v in versions]
# changed_images_{suffix}.json should contain all changed images
result_version = versions
return versions, result_version
def build_and_push_dummy_image(
image: DockerImage,
version_string: str,
push: bool,
) -> Tuple[bool, Path]:
dummy_source = "ubuntu:20.04"
logging.info("Building docker image %s as %s", image.repo, dummy_source)
build_log = (
Path(TEMP_PATH)
/ f"build_and_push_log_{image.repo.replace('/', '_')}_{version_string}.log"
)
cmd = (
f"docker pull {dummy_source}; "
f"docker tag {dummy_source} {image.repo}:{version_string}; "
)
if push:
cmd += f"docker push {image.repo}:{version_string}"
logging.info("Docker command to run: %s", cmd)
with TeePopen(cmd, build_log) as proc:
retcode = proc.wait()
if retcode != 0:
return False, build_log
logging.info("Processing of %s successfully finished", image.repo)
return True, build_log
def build_and_push_one_image(
image: DockerImageData,
image: DockerImage,
version_string: str,
additional_cache: List[str],
push: bool,
from_tag: Optional[str] = None,
child: bool,
) -> Tuple[bool, Path]:
if image.only_amd64 and platform.machine() not in ["amd64", "x86_64"]:
return build_and_push_dummy_image(image, version_string, push)
logging.info(
"Building docker image %s with version %s from path %s",
image.repo,
version_string,
image.path,
image.full_path,
)
build_log = (
Path(TEMP_PATH)
@ -49,8 +216,8 @@ def build_and_push_one_image(
push_arg = "--push "
from_tag_arg = ""
if from_tag:
from_tag_arg = f"--build-arg FROM_TAG={from_tag} "
if child:
from_tag_arg = f"--build-arg FROM_TAG={version_string} "
cache_from = (
f"--cache-from type=registry,ref={image.repo}:{version_string} "
@ -70,7 +237,7 @@ def build_and_push_one_image(
f"{cache_from} "
f"--cache-to type=inline,mode=max "
f"{push_arg}"
f"--progress plain {image.path}"
f"--progress plain {image.full_path}"
)
logging.info("Docker command to run: %s", cmd)
with TeePopen(cmd, build_log) as proc:
@ -84,11 +251,11 @@ def build_and_push_one_image(
def process_single_image(
image: DockerImageData,
image: DockerImage,
versions: List[str],
additional_cache: List[str],
push: bool,
from_tag: Optional[str] = None,
child: bool,
) -> TestResults:
logging.info("Image will be pushed with versions %s", ", ".join(versions))
results = [] # type: TestResults
@ -96,7 +263,7 @@ def process_single_image(
stopwatch = Stopwatch()
for i in range(5):
success, build_log = build_and_push_one_image(
image, ver, additional_cache, push, from_tag
image, ver, additional_cache, push, child
)
if success:
results.append(
@ -127,6 +294,27 @@ def process_single_image(
return results
def process_image_with_parents(
image: DockerImage,
versions: List[str],
additional_cache: List[str],
push: bool,
child: bool = False,
) -> TestResults:
results = [] # type: TestResults
if image.built:
return results
if image.parent is not None:
results += process_image_with_parents(
image.parent, versions, additional_cache, push, False
)
child = True
results += process_single_image(image, versions, additional_cache, push, child)
return results
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
@ -136,18 +324,30 @@ def parse_args() -> argparse.Namespace:
"--image-path docker/packager/binary",
)
parser.add_argument("--suffix", type=str, required=True, help="arch suffix")
parser.add_argument(
"--missing-images",
"--suffix",
type=str,
required=True,
help="json string or json file with images to build {IMAGE: TAG} or type all to build all",
help="suffix for all built images tags and resulting json file; the parameter "
"significantly changes the script behavior, e.g. changed_images.json is called "
"changed_images_{suffix}.json and contains list of all tags",
)
parser.add_argument(
"--image-tags",
"--repo",
type=str,
required=True,
help="json string or json file with all images and their tags {IMAGE: TAG}",
default="clickhouse",
help="docker hub repository prefix",
)
parser.add_argument(
"--all",
action="store_true",
help="rebuild all images",
)
parser.add_argument(
"--image-path",
type=str,
nargs="*",
help="list of image paths to build instead of using pr_info + diff URL, "
"e.g. 'docker/packager/binary'",
)
parser.add_argument("--reports", default=True, help=argparse.SUPPRESS)
parser.add_argument(
@ -170,81 +370,82 @@ def parse_args() -> argparse.Namespace:
def main():
# to be always aligned with docker paths from image.json
os.chdir(ROOT_DIR)
logging.basicConfig(level=logging.INFO)
stopwatch = Stopwatch()
args = parse_args()
if args.push:
logging.info("login to docker hub")
docker_login()
if args.suffix:
global NAME
NAME += f" {args.suffix}"
changed_json = TEMP_PATH / f"changed_images_{args.suffix}.json"
else:
changed_json = TEMP_PATH / "changed_images.json"
if args.push:
subprocess.check_output( # pylint: disable=unexpected-keyword-arg
"docker login --username 'robotclickhouse' --password-stdin",
input=get_parameter_from_ssm("dockerhub_robot_password"),
encoding="utf-8",
shell=True,
)
images_dict = get_images_dict(Path(REPO_COPY), IMAGES_FILE_PATH)
pr_info = PRInfo()
if args.all:
pr_info.changed_files = set(images_dict.keys())
elif args.image_path:
pr_info.changed_files = set(i for i in args.image_path)
else:
try:
pr_info.fetch_changed_files()
except TypeError:
# If the event does not contain diff, nothing will be built
pass
changed_images = get_changed_docker_images(pr_info, images_dict)
if changed_images:
logging.info(
"Has changed images: %s", ", ".join([im.path for im in changed_images])
)
image_versions, result_version = gen_versions(pr_info, args.suffix)
result_images = {}
test_results = [] # type: TestResults
additional_cache = [] # type: List[str]
# FIXME: add all tags taht we need. latest on master!
# if pr_info.release_pr:
# logging.info("Use %s as additional cache tag", pr_info.release_pr)
# additional_cache.append(str(pr_info.release_pr))
# if pr_info.merged_pr:
# logging.info("Use %s as additional cache tag", pr_info.merged_pr)
# additional_cache.append(str(pr_info.merged_pr))
if pr_info.release_pr:
logging.info("Use %s as additional cache tag", pr_info.release_pr)
additional_cache.append(str(pr_info.release_pr))
if pr_info.merged_pr:
logging.info("Use %s as additional cache tag", pr_info.merged_pr)
additional_cache.append(str(pr_info.merged_pr))
ok_cnt = 0
status = "success"
image_tags = (
json.loads(args.image_tags)
if not os.path.isfile(args.image_tags)
else json.load(open(args.image_tags))
)
missing_images = (
image_tags
if args.missing_images == "all"
else json.loads(args.missing_images)
if not os.path.isfile(args.missing_images)
else json.load(open(args.missing_images))
)
images_build_list = get_images_oredered_list()
for image in images_build_list:
if image.repo not in missing_images:
continue
logging.info("Start building image: %s", image)
image_versions = (
[image_tags[image.repo]]
if not args.suffix
else [f"{image_tags[image.repo]}-{args.suffix}"]
)
parent_version = (
None
if not image.parent
else image_tags[image.parent]
if not args.suffix
else f"{image_tags[image.parent]}-{args.suffix}"
for image in changed_images:
# If we are in backport PR, then pr_info.release_pr is defined
# We use it as tag to reduce rebuilding time
test_results += process_image_with_parents(
image, image_versions, additional_cache, args.push
)
result_images[image.repo] = result_version
res = process_single_image(
image,
image_versions,
additional_cache,
args.push,
from_tag=parent_version,
)
test_results += res
if all(x.status == "OK" for x in res):
ok_cnt += 1
else:
status = "failure"
break # No need to continue with next images
if changed_images:
description = "Updated " + ",".join([im.repo for im in changed_images])
else:
description = "Nothing to update"
description = format_description(
f"Images build done. built {ok_cnt} out of {len(missing_images)} images."
)
description = format_description(description)
with open(changed_json, "w", encoding="utf-8") as images_file:
logging.info("Saving changed images file %s", changed_json)
json.dump(result_images, images_file)
s3_helper = S3Helper()
pr_info = PRInfo()
status = "success"
if [r for r in test_results if r.status != "OK"]:
status = "failure"
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
print(f"::notice ::Report url: {url}")
@ -254,9 +455,7 @@ def main():
gh = Github(get_best_robot_token(), per_page=100)
commit = get_commit(gh, pr_info.sha)
post_commit_status(
commit, status, url, description, NAME, pr_info, dump_to_file=True
)
post_commit_status(commit, status, url, description, NAME, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -2,136 +2,19 @@
import json
import logging
import os
import subprocess
from pathlib import Path
from typing import Any, Dict, List, Optional
from env_helper import ROOT_DIR, DOCKER_TAG
from get_robot_token import get_parameter_from_ssm
from typing import Dict, List
IMAGES_FILE_PATH = Path("docker/images.json")
ImagesDict = Dict[str, dict]
def docker_login(relogin: bool = True) -> None:
if (
relogin
or subprocess.run( # pylint: disable=unexpected-keyword-arg
"docker system info | grep --quiet -E 'Username|Registry'",
shell=True,
check=False,
).returncode
== 1
):
subprocess.check_output( # pylint: disable=unexpected-keyword-arg
"docker login --username 'robotclickhouse' --password-stdin",
input=get_parameter_from_ssm("dockerhub_robot_password"),
encoding="utf-8",
shell=True,
)
class DockerImage:
def __init__(self, name: str, version: Optional[str] = None):
self.name = name
if version is None:
self.version = "latest"
else:
self.version = version
def __str__(self):
return f"{self.name}:{self.version}"
def pull_image(image: DockerImage) -> DockerImage:
try:
logging.info("Pulling image %s - start", image)
subprocess.check_output(
f"docker pull {image}",
stderr=subprocess.STDOUT,
shell=True,
)
logging.info("Pulling image %s - done", image)
except Exception as ex:
logging.info("Got execption pulling docker %s", ex)
raise ex
return image
def get_docker_image(image_name: str) -> DockerImage:
assert DOCKER_TAG and isinstance(DOCKER_TAG, str), "DOCKER_TAG env must be provided"
if "{" in DOCKER_TAG:
tags_map = json.loads(DOCKER_TAG)
assert (
image_name in tags_map
), "Image name does not exist in provided DOCKER_TAG json string"
return DockerImage(image_name, tags_map[image_name])
else:
# DOCKER_TAG is a tag itself
return DockerImage(image_name, DOCKER_TAG)
class DockerImageData:
def __init__(
self,
path: str,
repo: str,
only_amd64: bool,
parent: Optional["DockerImageData"] = None,
):
assert not path.startswith("/")
self.path = Path(ROOT_DIR) / path
self.repo = repo
self.only_amd64 = only_amd64
self.parent = parent
self.built = False
def __eq__(self, other) -> bool: # type: ignore
"""Is used to check if DockerImageData is in a set or not"""
return (
self.path == other.path
and self.repo == self.repo
and self.only_amd64 == other.only_amd64
)
def __lt__(self, other: Any) -> bool:
if not isinstance(other, DockerImageData):
return False
if self.parent and not other.parent:
return False
if not self.parent and other.parent:
return True
if self.path < other.path:
return True
if self.repo < other.repo:
return True
return False
def __hash__(self):
return hash(self.path)
def __str__(self):
return self.repo
def __repr__(self):
return (
f"DockerImageData(path={self.path},repo={self.repo},parent={self.parent})"
)
def get_images_dict(
repo_path: Optional[Path] = None, images_file_path: Optional[Path] = None
) -> ImagesDict:
def get_images_dict(repo_path: Path, images_file_path: Path) -> ImagesDict:
"""Return images suppose to build on the current architecture host"""
images_dict = {}
images_file_path = images_file_path if images_file_path else IMAGES_FILE_PATH
assert not images_file_path.is_absolute()
cur_dir = os.path.dirname(__file__)
path_to_images_file = (
repo_path if repo_path else Path(f"{cur_dir}/../..") / images_file_path
)
path_to_images_file = repo_path / images_file_path
if path_to_images_file.exists():
with open(path_to_images_file, "rb") as dict_file:
images_dict = json.load(dict_file)
@ -143,56 +26,6 @@ def get_images_dict(
return images_dict
def get_image_names(
repo_path: Optional[Path] = None, images_file_path: Optional[Path] = None
) -> List[str]:
def get_image_names(repo_path: Path, images_file_path: Path) -> List[str]:
images_dict = get_images_dict(repo_path, images_file_path)
return [info["name"] for (_, info) in images_dict.items()]
def get_images_info() -> Dict[str, dict]:
"""
get docker info from images.json in format "image name" : image_info
"""
images_dict = get_images_dict()
images_info: dict = {info["name"]: {"deps": []} for _, info in images_dict.items()}
for path, image_info_reversed in images_dict.items():
name = image_info_reversed["name"]
dependents = image_info_reversed["dependent"]
only_amd64 = "only_amd64" in image_info_reversed
images_info[name]["path"] = path
images_info[name]["only_amd64"] = only_amd64
for dep_path in dependents:
name_dep = images_dict[dep_path]["name"]
images_info[name_dep]["deps"] += [name]
assert len(images_dict) == len(images_info), "BUG!"
return images_info
def get_images_oredered_list() -> List[DockerImageData]:
"""
returns images in a sorted list so that dependents follow their dependees
"""
images_info = get_images_info()
ordered_images: List[DockerImageData] = []
ordered_names: List[str] = []
while len(ordered_names) < len(images_info):
for name, info in images_info.items():
if name in ordered_names:
continue
if all(dep in ordered_names for dep in info["deps"]):
ordered_names += [name]
parents = info["deps"]
assert (
len(parents) < 2
), "FIXME: Multistage docker images are not supported in CI"
ordered_images += [
DockerImageData(
path=info["path"],
repo=name,
only_amd64=info["only_amd64"],
parent=parents[0] if parents else None,
)
]
return ordered_images

View File

@ -6,26 +6,30 @@ import logging
import os
import subprocess
import sys
from typing import List, Tuple
from pathlib import Path
from typing import List, Dict, Tuple
from github import Github
from clickhouse_helper import (
ClickHouseHelper,
prepare_tests_results_for_clickhouse,
CHException,
)
from commit_status_helper import format_description, get_commit, post_commit_status
from get_robot_token import get_best_robot_token
from docker_images_helper import IMAGES_FILE_PATH, get_image_names
from env_helper import RUNNER_TEMP, REPO_COPY
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
from git_helper import Runner
from pr_info import PRInfo
from report import TestResult
from report import TestResults, TestResult
from s3_helper import S3Helper
from stopwatch import Stopwatch
from env_helper import ROOT_DIR
from upload_result_helper import upload_results
from docker_images_helper import docker_login, get_images_oredered_list
NAME = "Push multi-arch images to Dockerhub"
CHANGED_IMAGES = "changed_images_{}.json"
Images = Dict[str, List[str]]
def parse_args() -> argparse.Namespace:
@ -44,21 +48,10 @@ def parse_args() -> argparse.Namespace:
help="suffixes for existing images' tags. More than two should be given",
)
parser.add_argument(
"--missing-images",
type=str,
required=True,
help="json string or json file with images to build {IMAGE: TAG} or type all to build all",
)
parser.add_argument(
"--image-tags",
type=str,
required=True,
help="json string or json file with all images and their tags {IMAGE: TAG}",
)
parser.add_argument(
"--set-latest",
type=str,
help="add latest tag",
"--path",
type=Path,
default=RUNNER_TEMP,
help="path to changed_images_*.json files",
)
parser.add_argument("--reports", default=True, help=argparse.SUPPRESS)
parser.add_argument(
@ -84,13 +77,70 @@ def parse_args() -> argparse.Namespace:
return args
def create_manifest(
image: str, result_tag: str, tags: List[str], push: bool
) -> Tuple[str, str]:
manifest = f"{image}:{result_tag}"
cmd = "docker manifest create --amend " + " ".join(
(f"{image}:{t}" for t in [result_tag] + tags)
)
def load_images(path: Path, suffix: str) -> Images:
with open(path / CHANGED_IMAGES.format(suffix), "rb") as images:
return json.load(images) # type: ignore
def strip_suffix(suffix: str, images: Images) -> Images:
result = {}
for image, versions in images.items():
for v in versions:
if not v.endswith(f"-{suffix}"):
raise ValueError(
f"version {image}:{v} does not contain suffix {suffix}"
)
result[image] = [v[: -len(suffix) - 1] for v in versions]
return result
def check_sources(to_merge: Dict[str, Images]) -> Images:
"""get a dict {arch1: Images, arch2: Images}"""
result = {} # type: Images
first_suffix = ""
for suffix, images in to_merge.items():
if not result:
first_suffix = suffix
result = strip_suffix(suffix, images)
continue
if not result == strip_suffix(suffix, images):
raise ValueError(
f"images in {images} are not equal to {to_merge[first_suffix]}"
)
return result
def get_changed_images(images: Images) -> Dict[str, str]:
"""The original json format is {"image": "tag"}, so the output artifact is
produced here. The latest version is {PR_NUMBER}-{SHA1}
"""
return {k: v[-1] for k, v in images.items()}
def merge_images(to_merge: Dict[str, Images]) -> Dict[str, List[List[str]]]:
"""The function merges image-name:version-suffix1 and image-name:version-suffix2
into image-name:version"""
suffixes = to_merge.keys()
result_images = check_sources(to_merge)
merge = {} # type: Dict[str, List[List[str]]]
for image, versions in result_images.items():
merge[image] = []
for i, v in enumerate(versions):
merged_v = [v] # type: List[str]
for suf in suffixes:
merged_v.append(to_merge[suf][image][i])
merge[image].append(merged_v)
return merge
def create_manifest(image: str, tags: List[str], push: bool) -> Tuple[str, str]:
tag = tags[0]
manifest = f"{image}:{tag}"
cmd = "docker manifest create --amend " + " ".join((f"{image}:{t}" for t in tags))
logging.info("running: %s", cmd)
with subprocess.Popen(
cmd,
@ -125,51 +175,114 @@ def create_manifest(
return manifest, "OK"
def enrich_images(changed_images: Dict[str, str]) -> None:
all_image_names = get_image_names(Path(REPO_COPY), IMAGES_FILE_PATH)
images_to_find_tags_for = [
image for image in all_image_names if image not in changed_images
]
images_to_find_tags_for.sort()
logging.info(
"Trying to find versions for images:\n %s", "\n ".join(images_to_find_tags_for)
)
COMMIT_SHA_BATCH_SIZE = 100
MAX_COMMIT_BATCHES_TO_CHECK = 10
# Gets the sha of the last COMMIT_SHA_BATCH_SIZE commits after skipping some commits (see below)
LAST_N_ANCESTOR_SHA_COMMAND = f"git log --format=format:'%H' --max-count={COMMIT_SHA_BATCH_SIZE} --skip={{}} --merges"
git_runner = Runner()
GET_COMMIT_SHAS_QUERY = """
WITH {commit_shas:Array(String)} AS commit_shas,
{images:Array(String)} AS images
SELECT
splitByChar(':', test_name)[1] AS image_name,
argMax(splitByChar(':', test_name)[2], check_start_time) AS tag
FROM checks
WHERE
check_name == 'Push multi-arch images to Dockerhub'
AND position(test_name, checks.commit_sha)
AND checks.commit_sha IN commit_shas
AND image_name IN images
GROUP BY image_name
"""
batch_count = 0
# We use always publicly available DB here intentionally
ch_helper = ClickHouseHelper(
"https://play.clickhouse.com", {"X-ClickHouse-User": "play"}
)
while (
batch_count <= MAX_COMMIT_BATCHES_TO_CHECK and len(images_to_find_tags_for) != 0
):
commit_shas = git_runner(
LAST_N_ANCESTOR_SHA_COMMAND.format(batch_count * COMMIT_SHA_BATCH_SIZE)
).split("\n")
result = ch_helper.select_json_each_row(
"default",
GET_COMMIT_SHAS_QUERY,
{"commit_shas": commit_shas, "images": images_to_find_tags_for},
)
result.sort(key=lambda x: x["image_name"])
logging.info(
"Found images for commits %s..%s:\n %s",
commit_shas[0],
commit_shas[-1],
"\n ".join(f"{im['image_name']}:{im['tag']}" for im in result),
)
for row in result:
image_name = row["image_name"]
changed_images[image_name] = row["tag"]
images_to_find_tags_for.remove(image_name)
batch_count += 1
def main():
# to be aligned with docker paths from image.json
os.chdir(ROOT_DIR)
logging.basicConfig(level=logging.INFO)
stopwatch = Stopwatch()
args = parse_args()
if args.push:
docker_login()
subprocess.check_output( # pylint: disable=unexpected-keyword-arg
"docker login --username 'robotclickhouse' --password-stdin",
input=get_parameter_from_ssm("dockerhub_robot_password"),
encoding="utf-8",
shell=True,
)
archs = args.suffixes
assert len(archs) > 1, "arch suffix input param is invalid"
to_merge = {}
for suf in args.suffixes:
to_merge[suf] = load_images(args.path, suf)
image_tags = (
json.loads(args.image_tags)
if not os.path.isfile(args.image_tags)
else json.load(open(args.image_tags))
)
changed_images = get_changed_images(check_sources(to_merge))
os.environ["DOCKER_CLI_EXPERIMENTAL"] = "enabled"
merged = merge_images(to_merge)
test_results = []
status = "success"
ok_cnt, fail_cnt = 0, 0
images = get_images_oredered_list()
for image_obj in images:
tag = image_tags[image_obj.repo]
if image_obj.only_amd64:
# FIXME: WA until full arm support
tags = [f"{tag}-{arch}" for arch in archs if arch != "aarch64"]
else:
tags = [f"{tag}-{arch}" for arch in archs]
manifest, test_result = create_manifest(image_obj.repo, tag, tags, args.push)
test_results.append(TestResult(manifest, test_result))
if args.set_latest:
manifest, test_result = create_manifest(
image_obj.repo, "latest", tags, args.push
)
test_results = [] # type: TestResults
for image, versions in merged.items():
for tags in versions:
manifest, test_result = create_manifest(image, tags, args.push)
test_results.append(TestResult(manifest, test_result))
if test_result != "OK":
status = "failure"
if test_result != "OK":
status = "failure"
fail_cnt += 1
else:
ok_cnt += 1
enriched_images = changed_images.copy()
try:
# changed_images now contains all the images that are changed in this PR. Let's find the latest tag for the images that are not changed.
enrich_images(enriched_images)
except CHException as ex:
logging.warning("Couldn't get proper tags for not changed images: %s", ex)
with open(args.path / "changed_images.json", "w", encoding="utf-8") as ci:
json.dump(enriched_images, ci)
pr_info = PRInfo()
s3_helper = S3Helper()
@ -181,15 +294,16 @@ def main():
if not args.reports:
return
description = format_description(
f"Multiarch images created [ok: {ok_cnt}, failed: {fail_cnt}]"
)
if changed_images:
description = "Updated " + ", ".join(changed_images.keys())
else:
description = "Nothing to update"
description = format_description(description)
gh = Github(get_best_robot_token(), per_page=100)
commit = get_commit(gh, pr_info.sha)
post_commit_status(
commit, status, url, description, NAME, pr_info, dump_to_file=True
)
post_commit_status(commit, status, url, description, NAME, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,
@ -202,8 +316,6 @@ def main():
)
ch_helper = ClickHouseHelper()
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
if status == "failure":
sys.exit(1)
if __name__ == "__main__":

View File

@ -0,0 +1,90 @@
#!/usr/bin/env python3
import os
import json
import time
import subprocess
import logging
from pathlib import Path
from typing import List, Optional, Union
class DockerImage:
def __init__(self, name: str, version: Optional[str] = None):
self.name = name
if version is None:
self.version = "latest"
else:
self.version = version
def __str__(self):
return f"{self.name}:{self.version}"
def get_images_with_versions(
reports_path: Union[Path, str],
required_images: List[str],
pull: bool = True,
version: Optional[str] = None,
) -> List[DockerImage]:
images_path = None
for root, _, files in os.walk(reports_path):
for f in files:
if f == "changed_images.json":
images_path = os.path.join(root, "changed_images.json")
break
if not images_path:
logging.info("Images file not found")
else:
logging.info("Images file path %s", images_path)
if images_path is not None and os.path.exists(images_path):
logging.info("Images file exists")
with open(images_path, "r", encoding="utf-8") as images_fd:
images = json.load(images_fd)
logging.info("Got images %s", images)
else:
images = {}
docker_images = []
for image_name in required_images:
docker_image = DockerImage(image_name, version)
if image_name in images:
docker_image.version = images[image_name]
docker_images.append(docker_image)
latest_error = Exception("predefined to avoid access before created")
if pull:
for docker_image in docker_images:
for i in range(10):
try:
logging.info("Pulling image %s", docker_image)
subprocess.check_output(
f"docker pull {docker_image}",
stderr=subprocess.STDOUT,
shell=True,
)
break
except Exception as ex:
latest_error = ex
time.sleep(i * 3)
logging.info("Got execption pulling docker %s", ex)
else:
raise Exception(
"Cannot pull dockerhub for image docker pull "
f"{docker_image} because of {latest_error}"
)
return docker_images
def get_image_with_version(
reports_path: Union[Path, str],
image: str,
pull: bool = True,
version: Optional[str] = None,
) -> DockerImage:
logging.info("Looking for images file in %s", reports_path)
return get_images_with_versions(reports_path, [image], pull, version=version)[0]

View File

@ -4,33 +4,27 @@
import argparse
import json
import logging
import subprocess
import sys
import time
from pathlib import Path
from os import path as p, makedirs
from typing import Dict, List
from typing import List
from github import Github
from build_check import get_release_or_pr
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from commit_status_helper import format_description, get_commit, post_commit_status
from docker_images_helper import DockerImageData, docker_login
from env_helper import (
GITHUB_RUN_URL,
REPORT_PATH,
TEMP_PATH,
S3_BUILDS_BUCKET,
S3_DOWNLOAD,
)
from get_robot_token import get_best_robot_token
from docker_images_check import DockerImage
from env_helper import CI, GITHUB_RUN_URL, RUNNER_TEMP, S3_BUILDS_BUCKET, S3_DOWNLOAD
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
from git_helper import Git
from pr_info import PRInfo
from report import TestResults, TestResult
from s3_helper import S3Helper
from stopwatch import Stopwatch
from tee_popen import TeePopen
from build_download_helper import read_build_urls
from upload_result_helper import upload_results
from version_helper import (
ClickHouseVersion,
@ -39,10 +33,10 @@ from version_helper import (
version_arg,
)
TEMP_PATH = p.join(RUNNER_TEMP, "docker_images_check")
BUCKETS = {"amd64": "package_release", "arm64": "package_aarch64"}
git = Git(ignore_no_tags=True)
ARCH = ("amd64", "arm64")
class DelOS(argparse.Action):
def __call__(self, _, namespace, __, option_string=None):
@ -121,11 +115,6 @@ def parse_args() -> argparse.Namespace:
default=argparse.SUPPRESS,
help="don't build alpine image",
)
parser.add_argument(
"--allow-build-reuse",
action="store_true",
help="allows binaries built on different branch if source digest matches current repo state",
)
return parser.parse_args()
@ -225,29 +214,26 @@ def gen_tags(version: ClickHouseVersion, release_type: str) -> List[str]:
return tags
def buildx_args(urls: Dict[str, str], arch: str, direct_urls: List[str]) -> List[str]:
def buildx_args(bucket_prefix: str, arch: str) -> List[str]:
args = [
f"--platform=linux/{arch}",
f"--label=build-url={GITHUB_RUN_URL}",
f"--label=com.clickhouse.build.githash={git.sha}",
]
if direct_urls:
args.append(f"--build-arg=DIRECT_DOWNLOAD_URLS='{' '.join(direct_urls)}'")
elif urls:
url = urls[arch]
if bucket_prefix:
url = p.join(bucket_prefix, BUCKETS[arch]) # to prevent a double //
args.append(f"--build-arg=REPOSITORY='{url}'")
args.append(f"--build-arg=deb_location_url='{url}'")
return args
def build_and_push_image(
image: DockerImageData,
image: DockerImage,
push: bool,
repo_urls: dict[str, str],
bucket_prefix: str,
os: str,
tag: str,
version: ClickHouseVersion,
direct_urls: Dict[str, List[str]],
) -> TestResults:
result = [] # type: TestResults
if os != "ubuntu":
@ -264,19 +250,13 @@ def build_and_push_image(
# images must be built separately and merged together with `docker manifest`
digests = []
multiplatform_sw = Stopwatch()
for arch in ARCH:
for arch in BUCKETS:
single_sw = Stopwatch()
arch_tag = f"{tag}-{arch}"
metadata_path = p.join(TEMP_PATH, arch_tag)
dockerfile = p.join(image.path, f"Dockerfile.{os}")
dockerfile = p.join(image.full_path, f"Dockerfile.{os}")
cmd_args = list(init_args)
urls = []
if direct_urls:
if os == "ubuntu" and "clickhouse-server" in image.repo:
urls = [url for url in direct_urls[arch] if ".deb" in url]
else:
urls = [url for url in direct_urls[arch] if ".tgz" in url]
cmd_args.extend(buildx_args(repo_urls, arch, direct_urls=urls))
cmd_args.extend(buildx_args(bucket_prefix, arch))
if not push:
cmd_args.append(f"--tag={image.repo}:{arch_tag}")
cmd_args.extend(
@ -285,7 +265,7 @@ def build_and_push_image(
f"--build-arg=VERSION='{version.string}'",
"--progress=plain",
f"--file={dockerfile}",
image.path.as_posix(),
image.full_path.as_posix(),
]
)
cmd = " ".join(cmd_args)
@ -343,47 +323,25 @@ def main():
makedirs(TEMP_PATH, exist_ok=True)
args = parse_args()
image = DockerImageData(args.image_path, args.image_repo, False)
image = DockerImage(args.image_path, args.image_repo, False)
args.release_type = auto_release_type(args.version, args.release_type)
tags = gen_tags(args.version, args.release_type)
NAME = f"Docker image {image.repo} building check"
pr_info = None
repo_urls = dict()
direct_urls: Dict[str, List[str]] = dict()
pr_info = PRInfo()
release_or_pr, _ = get_release_or_pr(pr_info, args.version)
for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")):
if not args.bucket_prefix:
repo_urls[
arch
] = f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/{release_or_pr}/{pr_info.sha}/{build_name}"
else:
repo_urls[arch] = f"{args.bucket_prefix}/{build_name}"
if args.allow_build_reuse:
# read s3 urls from pre-downloaded build reports
if "clickhouse-server" in args.image_repo:
PACKAGES = [
"clickhouse-client",
"clickhouse-server",
"clickhouse-common-static",
]
elif "clickhouse-keeper" in args.image_repo:
PACKAGES = ["clickhouse-keeper"]
else:
assert False, "BUG"
urls = read_build_urls(build_name, Path(REPORT_PATH))
assert (
urls
), f"URLS has not been read from build report, report path[{REPORT_PATH}], build [{build_name}]"
direct_urls[arch] = [
url
for url in urls
if any(package in url for package in PACKAGES) and "-dbg" not in url
]
if CI:
pr_info = PRInfo()
release_or_pr, _ = get_release_or_pr(pr_info, args.version)
args.bucket_prefix = (
f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/{release_or_pr}/{pr_info.sha}"
)
if args.push:
docker_login()
subprocess.check_output( # pylint: disable=unexpected-keyword-arg
"docker login --username 'robotclickhouse' --password-stdin",
input=get_parameter_from_ssm("dockerhub_robot_password"),
encoding="utf-8",
shell=True,
)
NAME = f"Docker image {image.repo} build and push"
logging.info("Following tags will be created: %s", ", ".join(tags))
@ -393,7 +351,7 @@ def main():
for tag in tags:
test_results.extend(
build_and_push_image(
image, args.push, repo_urls, os, tag, args.version, direct_urls
image, args.push, args.bucket_prefix, os, tag, args.version
)
)
if test_results[-1].status != "OK":
@ -415,9 +373,7 @@ def main():
gh = Github(get_best_robot_token(), per_page=100)
commit = get_commit(gh, pr_info.sha)
post_commit_status(
commit, status, url, description, NAME, pr_info, dump_to_file=True
)
post_commit_status(commit, status, url, description, NAME, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -10,13 +10,14 @@ from github import Github
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from commit_status_helper import (
NotSet,
RerunHelper,
get_commit,
post_commit_status,
update_mergeable_check,
)
from docker_images_helper import get_docker_image, pull_image
from env_helper import TEMP_PATH, REPO_COPY
from docker_pull_helper import get_image_with_version
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import TestResults, TestResult
@ -56,6 +57,8 @@ def main():
temp_path = Path(TEMP_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path = Path(REPORTS_PATH)
reports_path.mkdir(parents=True, exist_ok=True)
repo_path = Path(REPO_COPY)
pr_info = PRInfo(need_changed_files=True)
@ -72,13 +75,7 @@ def main():
if not pr_info.has_changes_in_documentation() and not args.force:
logging.info("No changes in documentation")
post_commit_status(
commit,
"success",
"",
"No changes in docs",
NAME,
pr_info,
dump_to_file=True,
commit, "success", NotSet, "No changes in docs", NAME, pr_info
)
sys.exit(0)
@ -87,7 +84,7 @@ def main():
elif args.force:
logging.info("Check the docs because of force flag")
docker_image = pull_image(get_docker_image("clickhouse/docs-builder"))
docker_image = get_image_with_version(reports_path, "clickhouse/docs-builder")
test_output = temp_path / "docs_check_log"
test_output.mkdir(parents=True, exist_ok=True)
@ -141,9 +138,7 @@ def main():
s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME
)
print("::notice ::Report url: {report_url}")
post_commit_status(
commit, status, report_url, description, NAME, pr_info, dump_to_file=True
)
post_commit_status(commit, status, report_url, description, NAME, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -9,12 +9,10 @@ from build_download_helper import get_gh_api
module_dir = p.abspath(p.dirname(__file__))
git_root = p.abspath(p.join(module_dir, "..", ".."))
ROOT_DIR = git_root
CI = bool(os.getenv("CI"))
TEMP_PATH = os.getenv("TEMP_PATH", p.abspath(p.join(module_dir, "./tmp")))
REPORT_PATH = f"{TEMP_PATH}/reports"
# FIXME: latest should not be used in CI, set temporary for transition to "docker with digest as a tag"
DOCKER_TAG = os.getenv("DOCKER_TAG", "latest")
CACHES_PATH = os.getenv("CACHES_PATH", TEMP_PATH)
CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN")
GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH", "")
@ -25,6 +23,7 @@ GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com")
GITHUB_WORKSPACE = os.getenv("GITHUB_WORKSPACE", git_root)
GITHUB_RUN_URL = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}"
IMAGES_PATH = os.getenv("IMAGES_PATH", TEMP_PATH)
REPORTS_PATH = os.getenv("REPORTS_PATH", p.abspath(p.join(module_dir, "./reports")))
REPO_COPY = os.getenv("REPO_COPY", GITHUB_WORKSPACE)
RUNNER_TEMP = os.getenv("RUNNER_TEMP", p.abspath(p.join(module_dir, "./tmp")))
S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds")

View File

@ -7,7 +7,8 @@ import csv
import sys
import atexit
from pathlib import Path
from typing import Tuple
from typing import List, Tuple
from github import Github
from build_check import get_release_or_pr
@ -22,9 +23,8 @@ from commit_status_helper import (
update_mergeable_check,
format_description,
)
from docker_images_helper import DockerImage, get_docker_image, pull_image
from env_helper import S3_BUILDS_BUCKET, TEMP_PATH, REPO_COPY
from docker_pull_helper import get_image_with_version, DockerImage
from env_helper import S3_BUILDS_BUCKET, TEMP_PATH, REPO_COPY, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import FORCE_TESTS_LABEL, PRInfo
from report import TestResult, TestResults, read_test_results
@ -118,6 +118,8 @@ def main():
temp_path = Path(TEMP_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path = Path(REPORTS_PATH)
reports_path.mkdir(parents=True, exist_ok=True)
pr_info = PRInfo()
@ -134,7 +136,7 @@ def main():
sys.exit(1)
sys.exit(0)
docker_image = pull_image(get_docker_image("clickhouse/fasttest"))
docker_image = get_image_with_version(reports_path, "clickhouse/fasttest")
s3_helper = S3Helper()
@ -231,9 +233,7 @@ def main():
build_urls,
)
print(f"::notice ::Report url: {report_url}")
post_commit_status(
commit, state, report_url, description, NAME, pr_info, dump_to_file=True
)
post_commit_status(commit, state, report_url, description, NAME, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -4,6 +4,7 @@ from github import Github
from commit_status_helper import (
CI_STATUS_NAME,
NotSet,
get_commit,
get_commit_filtered_statuses,
post_commit_status,
@ -35,11 +36,10 @@ def main():
post_commit_status(
commit,
"success",
status.target_url,
status.target_url or NotSet,
"All checks finished",
CI_STATUS_NAME,
pr_info,
dump_to_file=True,
)

View File

@ -20,6 +20,7 @@ from clickhouse_helper import (
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import (
NotSet,
RerunHelper,
get_commit,
override_status,
@ -27,9 +28,9 @@ from commit_status_helper import (
post_commit_status_to_file,
update_mergeable_check,
)
from docker_images_helper import DockerImage, pull_image, get_docker_image
from docker_pull_helper import DockerImage, get_image_with_version
from download_release_packages import download_last_release
from env_helper import REPORT_PATH, TEMP_PATH, REPO_COPY
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import FORCE_TESTS_LABEL, PRInfo
from report import TestResults, read_test_results
@ -224,24 +225,16 @@ def main():
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path.mkdir(parents=True, exist_ok=True)
repo_path = Path(REPO_COPY)
reports_path = Path(REPORTS_PATH)
post_commit_path = temp_path / "functional_commit_status.tsv"
args = parse_args()
check_name = args.check_name or os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided as an input arg or in CHECK_NAME env"
kill_timeout = args.kill_timeout or int(os.getenv("KILL_TIMEOUT", "0"))
assert (
kill_timeout > 0
), "kill timeout must be provided as an input arg or in KILL_TIMEOUT env"
check_name = args.check_name
kill_timeout = args.kill_timeout
validate_bugfix_check = args.validate_bugfix
print(f"Runnin check [{check_name}] with timeout [{kill_timeout}]")
flaky_check = "flaky" in check_name.lower()
@ -292,11 +285,10 @@ def main():
post_commit_status(
commit,
state,
"",
NotSet,
NO_CHANGES_MSG,
check_name_with_group,
pr_info,
dump_to_file=True,
)
elif args.post_commit_status == "file":
post_commit_status_to_file(
@ -308,8 +300,7 @@ def main():
sys.exit(0)
image_name = get_image_name(check_name)
docker_image = pull_image(get_docker_image(image_name))
docker_image = get_image_with_version(reports_path, image_name)
packages_path = temp_path / "packages"
packages_path.mkdir(parents=True, exist_ok=True)
@ -388,13 +379,7 @@ def main():
print(f"::notice:: {check_name} Report url: {report_url}")
if args.post_commit_status == "commit_status":
post_commit_status(
commit,
state,
report_url,
description,
check_name_with_group,
pr_info,
dump_to_file=True,
commit, state, report_url, description, check_name_with_group, pr_info
)
elif args.post_commit_status == "file":
post_commit_status_to_file(

View File

@ -19,13 +19,6 @@ SHA_REGEXP = re.compile(r"\A([0-9]|[a-f]){40}\Z")
CWD = p.dirname(p.realpath(__file__))
TWEAK = 1
GIT_PREFIX = ( # All commits to remote are done as robot-clickhouse
"git -c user.email=robot-clickhouse@users.noreply.github.com "
"-c user.name=robot-clickhouse -c commit.gpgsign=false "
"-c core.sshCommand="
"'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'"
)
# Py 3.8 removeprefix and removesuffix
def removeprefix(string: str, prefix: str) -> str:

View File

@ -25,8 +25,8 @@ from commit_status_helper import (
update_mergeable_check,
)
from compress_files import compress_fast
from docker_images_helper import DockerImage, pull_image, get_docker_image
from env_helper import CI, REPORT_PATH, TEMP_PATH as TEMP
from docker_pull_helper import get_image_with_version, DockerImage
from env_helper import CI, TEMP_PATH as TEMP, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import TestResults, TestResult, FAILURE, FAIL, OK, SUCCESS
@ -151,7 +151,7 @@ def test_install_tgz(image: DockerImage) -> TestResults:
# FIXME: I couldn't find why Type=notify is broken in centos:8
# systemd just ignores the watchdog completely
tests = {
f"Install server tgz in {image}": r"""#!/bin/bash -ex
f"Install server tgz in {image.name}": r"""#!/bin/bash -ex
[ -f /etc/debian_version ] && CONFIGURE=configure || CONFIGURE=
for pkg in /packages/clickhouse-{common,client,server}*tgz; do
package=${pkg%-*}
@ -161,7 +161,7 @@ for pkg in /packages/clickhouse-{common,client,server}*tgz; do
done
[ -f /etc/yum.conf ] && echo CLICKHOUSE_WATCHDOG_ENABLE=0 > /etc/default/clickhouse-server
bash -ex /packages/server_test.sh""",
f"Install keeper tgz in {image}": r"""#!/bin/bash -ex
f"Install keeper tgz in {image.name}": r"""#!/bin/bash -ex
[ -f /etc/debian_version ] && CONFIGURE=configure || CONFIGURE=
for pkg in /packages/clickhouse-keeper*tgz; do
package=${pkg%-*}
@ -224,6 +224,7 @@ def parse_args() -> argparse.Namespace:
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="The script to check if the packages are able to install",
)
parser.add_argument(
"check_name",
help="check name, used to download the packages",
@ -288,9 +289,10 @@ def main():
)
sys.exit(0)
deb_image = pull_image(get_docker_image(DEB_IMAGE))
rpm_image = pull_image(get_docker_image(RPM_IMAGE))
docker_images = {
name: get_image_with_version(REPORTS_PATH, name, args.download)
for name in (RPM_IMAGE, DEB_IMAGE)
}
prepare_test_scripts()
if args.download:
@ -310,7 +312,7 @@ def main():
return is_match
download_builds_filter(
args.check_name, REPORT_PATH, TEMP_PATH, filter_artifacts
args.check_name, REPORTS_PATH, TEMP_PATH, filter_artifacts
)
test_results = [] # type: TestResults
@ -323,12 +325,12 @@ def main():
subprocess.check_output(f"{ch_copy.absolute()} local -q 'SELECT 1'", shell=True)
if args.deb:
test_results.extend(test_install_deb(deb_image))
test_results.extend(test_install_deb(docker_images[DEB_IMAGE]))
if args.rpm:
test_results.extend(test_install_rpm(rpm_image))
test_results.extend(test_install_rpm(docker_images[RPM_IMAGE]))
if args.tgz:
test_results.extend(test_install_tgz(deb_image))
test_results.extend(test_install_tgz(rpm_image))
test_results.extend(test_install_tgz(docker_images[DEB_IMAGE]))
test_results.extend(test_install_tgz(docker_images[RPM_IMAGE]))
state = SUCCESS
test_status = OK
@ -358,15 +360,7 @@ def main():
description = format_description(description)
post_commit_status(
commit,
state,
report_url,
description,
args.check_name,
pr_info,
dump_to_file=True,
)
post_commit_status(commit, state, report_url, description, args.check_name, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -24,9 +24,9 @@ from commit_status_helper import (
post_commit_status,
post_commit_status_to_file,
)
from docker_images_helper import DockerImage, pull_image, get_docker_image
from docker_pull_helper import get_images_with_versions, DockerImage
from download_release_packages import download_last_release
from env_helper import REPORT_PATH, TEMP_PATH, REPO_COPY
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import ERROR, TestResult, TestResults, read_test_results
@ -166,17 +166,14 @@ def main():
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
post_commit_path = temp_path / "integration_commit_status.tsv"
repo_path = Path(REPO_COPY)
reports_path = Path(REPORTS_PATH)
args = parse_args()
check_name = args.check_name or os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided in --check-name input option or in CHECK_NAME env"
check_name = args.check_name
validate_bugfix_check = args.validate_bugfix
if "RUN_BY_HASH_NUM" in os.environ:
@ -218,7 +215,7 @@ def main():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
images = [pull_image(get_docker_image(i)) for i in IMAGES]
images = get_images_with_versions(reports_path, IMAGES)
result_path = temp_path / "output_dir"
result_path.mkdir(parents=True, exist_ok=True)
@ -313,13 +310,7 @@ def main():
print(f"::notice:: {check_name} Report url: {report_url}")
if args.post_commit_status == "commit_status":
post_commit_status(
commit,
state,
report_url,
description,
check_name_with_group,
pr_info,
dump_to_file=True,
commit, state, report_url, description, check_name_with_group, pr_info
)
elif args.post_commit_status == "file":
post_commit_status_to_file(post_commit_path, description, state, report_url)

View File

@ -292,9 +292,7 @@ def main():
)
print(f"::notice ::Report url: {report_url}")
post_commit_status(
commit, status, report_url, description, check_name, pr_info, dump_to_file=True
)
post_commit_status(commit, status, report_url, description, check_name, pr_info)
ch_helper = ClickHouseHelper()
prepared_events = prepare_tests_results_for_clickhouse(

View File

@ -20,9 +20,9 @@ from commit_status_helper import (
get_commit,
update_mergeable_check,
)
from docker_images_helper import DockerImage, pull_image, get_docker_image
from docker_pull_helper import DockerImage, get_image_with_version
from env_helper import REPORT_PATH, TEMP_PATH, REPO_COPY
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import TestResults
@ -107,9 +107,8 @@ def main():
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
repo_path = Path(REPO_COPY)
reports_path = REPORTS_PATH
args = parse_args()
check_name = args.check_name
@ -138,7 +137,7 @@ def main():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = pull_image(get_docker_image("clickhouse/libfuzzer"))
docker_image = get_image_with_version(reports_path, "clickhouse/libfuzzer")
fuzzers_path = temp_path / "fuzzers"
fuzzers_path.mkdir(parents=True, exist_ok=True)

View File

@ -4,7 +4,7 @@ import argparse
import logging
import os
from commit_status_helper import get_commit, post_commit_status
from commit_status_helper import NotSet, get_commit, post_commit_status
from env_helper import GITHUB_JOB_URL
from get_robot_token import get_best_robot_token
from github_helper import GitHub
@ -49,13 +49,7 @@ def main():
commit = get_commit(gh, args.commit)
gh.get_rate_limit()
post_commit_status(
commit,
"success",
url,
description,
RELEASE_READY_STATUS,
pr_info,
dump_to_file=True,
commit, "success", url or NotSet, description, RELEASE_READY_STATUS, pr_info
)

View File

@ -14,15 +14,15 @@ from github import Github
from commit_status_helper import RerunHelper, get_commit, post_commit_status
from ci_config import CI_CONFIG
from docker_images_helper import pull_image, get_docker_image
from docker_pull_helper import get_image_with_version
from env_helper import (
GITHUB_EVENT_PATH,
GITHUB_RUN_URL,
REPO_COPY,
REPORTS_PATH,
S3_BUILDS_BUCKET,
S3_DOWNLOAD,
TEMP_PATH,
REPORT_PATH,
)
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
from pr_info import PRInfo
@ -30,7 +30,6 @@ from s3_helper import S3Helper
from tee_popen import TeePopen
from clickhouse_helper import get_instance_type, get_instance_id
from stopwatch import Stopwatch
from build_download_helper import download_builds_filter
IMAGE_NAME = "clickhouse/performance-comparison"
@ -64,7 +63,6 @@ def get_run_command(
f"docker run --privileged --volume={workspace}:/workspace "
f"--volume={result_path}:/output "
f"--volume={repo_tests_path}:/usr/share/clickhouse-test "
f"--volume={TEMP_PATH}:/artifacts "
f"--cap-add syslog --cap-add sys_admin --cap-add sys_rawio "
f"{env_str} {additional_env} "
f"{image}"
@ -79,11 +77,9 @@ def main():
temp_path = Path(TEMP_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
repo_tests_path = Path(REPO_COPY, "tests")
reports_path = Path(REPORTS_PATH)
check_name = sys.argv[1] if len(sys.argv) > 1 else os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided as an input arg or in CHECK_NAME env"
check_name = sys.argv[1]
required_build = CI_CONFIG.test_configs[check_name].required_build
with open(GITHUB_EVENT_PATH, "r", encoding="utf-8") as event_file:
@ -127,13 +123,7 @@ def main():
message = "Skipped, not labeled with 'pr-performance'"
report_url = GITHUB_RUN_URL
post_commit_status(
commit,
status,
report_url,
message,
check_name_with_group,
pr_info,
dump_to_file=True,
commit, status, report_url, message, check_name_with_group, pr_info
)
sys.exit(0)
@ -151,7 +141,7 @@ def main():
.replace("/", "_")
)
docker_image = pull_image(get_docker_image(IMAGE_NAME))
docker_image = get_image_with_version(reports_path, IMAGE_NAME)
result_path = temp_path / "result"
result_path.mkdir(parents=True, exist_ok=True)
@ -168,11 +158,6 @@ def main():
"CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME_PREFIX": check_name_prefix,
}
download_builds_filter(
check_name, REPORT_PATH, TEMP_PATH, lambda url: "performance.tar.zst" in url
)
assert os.path.exists(f"{TEMP_PATH}/performance.tar.zst"), "Perf artifact not found"
docker_env += "".join([f" -e {name}" for name in env_extra])
run_command = get_run_command(
@ -279,13 +264,7 @@ def main():
)
post_commit_status(
commit,
status,
report_url,
message,
check_name_with_group,
pr_info,
dump_to_file=True,
commit, status, report_url, message, check_name_with_group, pr_info
)
if status == "error":

View File

@ -415,13 +415,10 @@ class BuildResult:
def _set_properties(self) -> None:
if all(p is not None for p in (self._job_name, self._job_html_url)):
return
job_data = {}
# quick check @self.job_api_url is valid url before request. it's set to "missing" for dummy BuildResult
if "http" in self.job_api_url:
try:
job_data = get_gh_api(self.job_api_url).json()
except Exception:
pass
try:
job_data = get_gh_api(self.job_api_url).json()
except Exception:
job_data = {}
# job_name can be set manually
self._job_name = self._job_name or job_data.get("name", "unknown")
self._job_html_url = job_data.get("html_url", "")

View File

@ -7,6 +7,7 @@ from github import Github
from commit_status_helper import (
CI_STATUS_NAME,
NotSet,
create_ci_report,
format_description,
get_commit,
@ -136,7 +137,6 @@ def main():
if pr_labels_to_remove:
remove_labels(gh, pr_info, pr_labels_to_remove)
# FIXME: it should rather be in finish check. no reason to stop ci run.
if FEATURE_LABEL in pr_info.labels and not pr_info.has_changes_in_documentation():
print(
f"The '{FEATURE_LABEL}' in the labels, "
@ -145,7 +145,7 @@ def main():
post_commit_status( # do not pass pr_info here intentionally
commit,
"failure",
"",
NotSet,
f"expect adding docs for {FEATURE_LABEL}",
DOCS_NAME,
pr_info,
@ -181,23 +181,13 @@ def main():
if not can_run:
print("::notice ::Cannot run")
post_commit_status(
commit,
labels_state,
ci_report_url,
description,
CI_STATUS_NAME,
pr_info,
commit, labels_state, ci_report_url, description, CI_STATUS_NAME, pr_info
)
sys.exit(1)
else:
print("::notice ::Can run")
post_commit_status(
commit,
"pending",
ci_report_url,
description,
CI_STATUS_NAME,
pr_info,
commit, "pending", ci_report_url, description, CI_STATUS_NAME, pr_info
)

View File

@ -117,40 +117,6 @@ class S3Helper:
return S3Helper.copy_file_to_local(S3_BUILDS_BUCKET, file_path, s3_path)
def upload_file(
self, bucket: str, file_path: Union[Path, str], s3_path: Union[Path, str]
) -> str:
return self._upload_file_to_s3(bucket, Path(file_path), str(s3_path))
def download_file(
self, bucket: str, s3_path: str, local_file_path: Union[Path, str]
) -> None:
if Path(local_file_path).is_dir():
local_file_path = Path(local_file_path) / s3_path.split("/")[-1]
try:
self.client.download_file(bucket, s3_path, local_file_path)
except botocore.exceptions.ClientError as e:
if e.response and e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
assert False, f"No such object [s3://{S3_BUILDS_BUCKET}/{s3_path}]"
def download_files(
self,
bucket: str,
s3_path: str,
file_suffix: str,
local_directory: Union[Path, str],
) -> List[str]:
local_directory = Path(local_directory)
local_directory.mkdir(parents=True, exist_ok=True)
objects = self.list_prefix_non_recursive(s3_path)
res = []
for obj in objects:
if obj.endswith(file_suffix):
local_file_path = local_directory
self.download_file(bucket, obj, local_file_path)
res.append(obj.split("/")[-1])
return res
def fast_parallel_upload_dir(
self, dir_path: Path, s3_dir_path: str, bucket_name: str
) -> List[str]:
@ -312,18 +278,6 @@ class S3Helper:
return result
def list_prefix_non_recursive(
self, s3_prefix_path: str, bucket: str = S3_BUILDS_BUCKET
) -> List[str]:
objects = self.client.list_objects_v2(Bucket=bucket, Prefix=s3_prefix_path)
result = []
if "Contents" in objects:
for obj in objects["Contents"]:
if "/" not in obj["Key"][len(s3_prefix_path) + 1 :]:
result.append(obj["Key"])
return result
def url_if_exists(self, key: str, bucket: str = S3_BUILDS_BUCKET) -> str:
if not CI:
local_path = self.local_path(bucket, key)

View File

@ -1,7 +1,6 @@
#!/usr/bin/env python3
import logging
import os
import subprocess
import sys
from pathlib import Path
@ -16,10 +15,10 @@ from commit_status_helper import (
get_commit,
post_commit_status,
)
from docker_images_helper import DockerImage, pull_image, get_docker_image
from docker_pull_helper import get_image_with_version, DockerImage
from env_helper import (
GITHUB_RUN_URL,
REPORT_PATH,
REPORTS_PATH,
TEMP_PATH,
)
from get_robot_token import get_best_robot_token
@ -51,12 +50,10 @@ def main():
temp_path = Path(TEMP_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path = Path(REPORT_PATH)
check_name = sys.argv[1] if len(sys.argv) > 1 else os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided as an input arg or in CHECK_NAME env"
reports_path = Path(REPORTS_PATH)
check_name = sys.argv[1]
pr_info = PRInfo()
@ -68,7 +65,7 @@ def main():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = pull_image(get_docker_image(IMAGE_NAME))
docker_image = get_image_with_version(reports_path, IMAGE_NAME)
build_name = get_build_name_for_check(check_name)
urls = read_build_urls(build_name, reports_path)
@ -150,9 +147,7 @@ def main():
check_name,
)
post_commit_status(
commit, status, report_url, description, check_name, pr_info, dump_to_file=True
)
post_commit_status(commit, status, report_url, description, check_name, pr_info)
print(f"::notice:: {check_name} Report url: {report_url}")
ch_helper = ClickHouseHelper()

View File

@ -18,8 +18,8 @@ from commit_status_helper import (
override_status,
post_commit_status,
)
from docker_images_helper import DockerImage, pull_image, get_docker_image
from env_helper import REPORT_PATH, TEMP_PATH, REPO_COPY
from docker_pull_helper import get_image_with_version, DockerImage
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import OK, FAIL, ERROR, SUCCESS, TestResults, TestResult, read_test_results
@ -70,16 +70,8 @@ def read_check_status(result_folder: Path) -> Tuple[str, str]:
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--check-name",
required=False,
default="",
)
parser.add_argument(
"--kill-timeout",
required=False,
default=0,
)
parser.add_argument("check_name")
parser.add_argument("kill_timeout", type=int)
return parser.parse_args()
@ -89,20 +81,12 @@ def main():
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
repo_path = Path(REPO_COPY)
reports_path = Path(REPORTS_PATH)
args = parse_args()
check_name = args.check_name
check_name = args.check_name or os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided as an input arg or in CHECK_NAME env"
kill_timeout = args.kill_timeout or int(os.getenv("KILL_TIMEOUT", "0"))
assert (
kill_timeout > 0
), "kill timeout must be provided as an input arg or in KILL_TIMEOUT env"
pr_info = PRInfo()
gh = Github(get_best_robot_token(), per_page=100)
@ -113,7 +97,7 @@ def main():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = pull_image(get_docker_image(IMAGE_NAME))
docker_image = get_image_with_version(reports_path, IMAGE_NAME)
repo_tests_path = repo_path / "tests"
@ -139,7 +123,7 @@ def main():
)
logging.info("Going to run func tests: %s", run_command)
with TeePopen(run_command, run_log_path, timeout=kill_timeout) as process:
with TeePopen(run_command, run_log_path, timeout=args.kill_timeout) as process:
retcode = process.wait()
if retcode == 0:
logging.info("Run successfully")
@ -206,9 +190,7 @@ def main():
assert description is not None
# FIXME: force SUCCESS until all cases are fixed
status = SUCCESS
post_commit_status(
commit, status, report_url, description, check_name, pr_info, dump_to_file=True
)
post_commit_status(commit, status, report_url, description, check_name, pr_info)
if __name__ == "__main__":

View File

@ -16,10 +16,10 @@ from commit_status_helper import (
get_commit,
post_commit_status,
)
from docker_images_helper import pull_image, get_docker_image
from docker_pull_helper import get_image_with_version
from env_helper import (
GITHUB_RUN_URL,
REPORT_PATH,
REPORTS_PATH,
TEMP_PATH,
)
from get_robot_token import get_best_robot_token
@ -50,13 +50,9 @@ def main():
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path = Path(REPORTS_PATH)
check_name = sys.argv[1] if len(sys.argv) > 1 else os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided as an input arg or in CHECK_NAME env"
check_name = sys.argv[1]
temp_path.mkdir(parents=True, exist_ok=True)
@ -70,7 +66,7 @@ def main():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = pull_image(get_docker_image(IMAGE_NAME))
docker_image = get_image_with_version(reports_path, IMAGE_NAME)
build_name = get_build_name_for_check(check_name)
print(build_name)
@ -154,9 +150,7 @@ def main():
logging.info("Result: '%s', '%s', '%s'", status, description, report_url)
print(f"::notice ::Report url: {report_url}")
post_commit_status(
commit, status, report_url, description, check_name, pr_info, dump_to_file=True
)
post_commit_status(commit, status, report_url, description, check_name, pr_info)
if __name__ == "__main__":

View File

@ -2,7 +2,6 @@
import csv
import logging
import os
import subprocess
import sys
from pathlib import Path
@ -22,8 +21,8 @@ from commit_status_helper import (
post_commit_status,
format_description,
)
from docker_images_helper import DockerImage, pull_image, get_docker_image
from env_helper import REPORT_PATH, TEMP_PATH, REPO_COPY
from docker_pull_helper import DockerImage, get_image_with_version
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import TestResult, TestResults, read_test_results
@ -127,15 +126,12 @@ def run_stress_test(docker_image_name: str) -> None:
stopwatch = Stopwatch()
temp_path = Path(TEMP_PATH)
reports_path = Path(REPORT_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
repo_path = Path(REPO_COPY)
repo_tests_path = repo_path / "tests"
reports_path = Path(REPORTS_PATH)
check_name = sys.argv[1] if len(sys.argv) > 1 else os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided as an input arg or in CHECK_NAME env"
check_name = sys.argv[1]
pr_info = PRInfo()
@ -147,7 +143,7 @@ def run_stress_test(docker_image_name: str) -> None:
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = pull_image(get_docker_image(docker_image_name))
docker_image = get_image_with_version(reports_path, docker_image_name)
packages_path = temp_path / "packages"
packages_path.mkdir(parents=True, exist_ok=True)
@ -216,9 +212,7 @@ def run_stress_test(docker_image_name: str) -> None:
)
print(f"::notice ::Report url: {report_url}")
post_commit_status(
commit, state, report_url, description, check_name, pr_info, dump_to_file=True
)
post_commit_status(commit, state, report_url, description, check_name, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -9,6 +9,7 @@ import sys
from pathlib import Path
from typing import List, Tuple
from clickhouse_helper import (
ClickHouseHelper,
prepare_tests_results_for_clickhouse,
@ -19,21 +20,27 @@ from commit_status_helper import (
post_commit_status,
update_mergeable_check,
)
from env_helper import REPO_COPY, TEMP_PATH
from docker_pull_helper import get_image_with_version
from env_helper import REPO_COPY, REPORTS_PATH, TEMP_PATH
from get_robot_token import get_best_robot_token
from github_helper import GitHub
from git_helper import GIT_PREFIX, git_runner
from git_helper import git_runner
from pr_info import PRInfo
from report import TestResults, read_test_results
from s3_helper import S3Helper
from ssh import SSHKey
from stopwatch import Stopwatch
from docker_images_helper import get_docker_image, pull_image
from upload_result_helper import upload_results
NAME = "Style Check"
GIT_PREFIX = ( # All commits to remote are done as robot-clickhouse
"git -c user.email=robot-clickhouse@users.noreply.github.com "
"-c user.name=robot-clickhouse -c commit.gpgsign=false "
"-c core.sshCommand="
"'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'"
)
def process_result(
result_directory: Path,
@ -135,13 +142,16 @@ def main():
repo_path = Path(REPO_COPY)
temp_path = Path(TEMP_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
reports_path = Path(REPORTS_PATH)
reports_path.mkdir(parents=True, exist_ok=True)
pr_info = PRInfo()
gh = GitHub(get_best_robot_token(), create_cache_dir=False)
commit = get_commit(gh, pr_info.sha)
if args.push:
checkout_head(pr_info)
gh = GitHub(get_best_robot_token(), create_cache_dir=False)
commit = get_commit(gh, pr_info.sha)
atexit.register(update_mergeable_check, gh, pr_info, NAME)
rerun_helper = RerunHelper(commit, NAME)
@ -153,14 +163,13 @@ def main():
code = int(state != "success")
sys.exit(code)
docker_image = get_image_with_version(reports_path, "clickhouse/style-test")
s3_helper = S3Helper()
IMAGE_NAME = "clickhouse/style-test"
image = pull_image(get_docker_image(IMAGE_NAME))
cmd = (
f"docker run -u $(id -u ${{USER}}):$(id -g ${{USER}}) --cap-add=SYS_PTRACE "
f"--volume={repo_path}:/ClickHouse --volume={temp_path}:/test_output "
f"{image}"
f"{docker_image}"
)
logging.info("Is going to run the command: %s", cmd)
@ -179,9 +188,7 @@ def main():
s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME
)
print(f"::notice ::Report url: {report_url}")
post_commit_status(
commit, state, report_url, description, NAME, pr_info, dump_to_file=True
)
post_commit_status(commit, state, report_url, description, NAME, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -38,6 +38,35 @@ class TestDigests(unittest.TestCase):
dh._digest_file(self.tests_dir / "symlink-12", hash_tested)
self.assertEqual(hash_expected.digest(), hash_tested.digest())
def test__digest_directory(self):
hash_tested = md5()
with self.assertRaises(
AssertionError, msg="_digest_directory shouldn't work with files"
):
dh._digest_directory(self.tests_dir / "12", hash_tested)
with self.assertRaises(
AssertionError, msg="_digest_directory shouldn't work with broken links"
):
dh._digest_file(self.broken_link, hash_tested)
# dir1
hash_expected = md5()
hash_expected.update(_12 + _14)
dh._digest_directory(self.tests_dir / "dir1", hash_tested)
self.assertEqual(hash_expected.digest(), hash_tested.digest())
# dir2 contains 12 and 13
hash_expected = md5()
hash_expected.update(_12 + _13)
hash_tested = md5()
dh._digest_directory(self.tests_dir / "dir2", hash_tested)
self.assertEqual(hash_expected.digest(), hash_tested.digest())
# dir3 is symlink to dir2
hash_tested = md5()
dh._digest_directory(self.tests_dir / "dir3", hash_tested)
self.assertEqual(hash_expected.digest(), hash_tested.digest())
def test_digest_path(self):
# test broken link does nothing
self.assertEqual(
@ -76,7 +105,7 @@ class TestDigests(unittest.TestCase):
hash_expected = md5()
hash_expected.update(_12 * 2 + _14 + (_12 + _13) * 2 + _12)
self.assertEqual(
hash_expected.hexdigest(), dh.digest_path(self.tests_dir).hexdigest()
hash_expected.digest(), dh.digest_path(self.tests_dir).digest()
)
def test_digest_paths(self):
@ -90,9 +119,19 @@ class TestDigests(unittest.TestCase):
hash_unordered = dh.digest_paths(
(self.tests_dir / d for d in ("dir3", "dir1", "dir2"))
)
self.assertEqual(hash_ordered.digest(), hash_unordered.digest())
self.assertNotEqual(hash_ordered.digest(), hash_unordered.digest())
self.assertNotEqual(hash_ordered.digest(), hash_reversed.digest())
self.assertNotEqual(hash_unordered.digest(), hash_reversed.digest())
def test_digest_consistent_paths(self):
# test paths order does not matter
hash_ordered = dh.digest_consistent_paths(
(self.tests_dir / d for d in ("dir1", "dir2", "dir3"))
)
hash_reversed = dh.digest_consistent_paths(
(self.tests_dir / d for d in ("dir3", "dir2", "dir1"))
)
self.assertEqual(hash_ordered.digest(), hash_reversed.digest())
self.assertEqual(hash_unordered.digest(), hash_reversed.digest())
@classmethod
def setUpClass(cls):

View File

@ -2,6 +2,13 @@
import unittest
from unittest.mock import patch, MagicMock
from pathlib import Path
from env_helper import GITHUB_RUN_URL
from pr_info import PRInfo
from report import TestResult
import docker_images_check as di
from docker_images_helper import get_images_dict
from version_helper import get_version_from_string
import docker_server as ds
@ -9,6 +16,257 @@ import docker_server as ds
# di.logging.basicConfig(level=di.logging.INFO)
class TestDockerImageCheck(unittest.TestCase):
def test_get_changed_docker_images(self):
pr_info = PRInfo(PRInfo.default_event.copy())
pr_info.changed_files = {
"docker/test/stateless",
"docker/test/base",
"docker/docs/builder",
}
images = sorted(
list(
di.get_changed_docker_images(
pr_info,
get_images_dict(
Path(__file__).parent,
Path("tests/docker_images_for_tests.json"),
),
)
)
)
self.maxDiff = None
expected = sorted(
[
di.DockerImage("docker/test/base", "clickhouse/test-base", False),
di.DockerImage("docker/docs/builder", "clickhouse/docs-builder", True),
di.DockerImage(
"docker/test/sqltest",
"clickhouse/sqltest",
False,
"clickhouse/test-base", # type: ignore
),
di.DockerImage(
"docker/test/stateless",
"clickhouse/stateless-test",
False,
"clickhouse/test-base", # type: ignore
),
di.DockerImage(
"docker/test/integration/base",
"clickhouse/integration-test",
False,
"clickhouse/test-base", # type: ignore
),
di.DockerImage(
"docker/test/fuzzer",
"clickhouse/fuzzer",
False,
"clickhouse/test-base", # type: ignore
),
di.DockerImage(
"docker/test/keeper-jepsen",
"clickhouse/keeper-jepsen-test",
False,
"clickhouse/test-base", # type: ignore
),
di.DockerImage(
"docker/docs/check",
"clickhouse/docs-check",
False,
"clickhouse/docs-builder", # type: ignore
),
di.DockerImage(
"docker/docs/release",
"clickhouse/docs-release",
False,
"clickhouse/docs-builder", # type: ignore
),
di.DockerImage(
"docker/test/stateful",
"clickhouse/stateful-test",
False,
"clickhouse/stateless-test", # type: ignore
),
di.DockerImage(
"docker/test/unit",
"clickhouse/unit-test",
False,
"clickhouse/stateless-test", # type: ignore
),
di.DockerImage(
"docker/test/stress",
"clickhouse/stress-test",
False,
"clickhouse/stateful-test", # type: ignore
),
]
)
self.assertEqual(images, expected)
def test_gen_version(self):
pr_info = PRInfo(PRInfo.default_event.copy())
pr_info.base_ref = "anything-else"
versions, result_version = di.gen_versions(pr_info, None)
self.assertEqual(versions, ["0", "0-HEAD"])
self.assertEqual(result_version, "0-HEAD")
pr_info.base_ref = "master"
versions, result_version = di.gen_versions(pr_info, None)
self.assertEqual(versions, ["latest", "0", "0-HEAD"])
self.assertEqual(result_version, "0-HEAD")
versions, result_version = di.gen_versions(pr_info, "suffix")
self.assertEqual(versions, ["latest-suffix", "0-suffix", "0-HEAD-suffix"])
self.assertEqual(result_version, versions)
pr_info.number = 1
versions, result_version = di.gen_versions(pr_info, None)
self.assertEqual(versions, ["1", "1-HEAD"])
self.assertEqual(result_version, "1-HEAD")
@patch("docker_images_check.TeePopen")
@patch("platform.machine")
def test_build_and_push_one_image(self, mock_machine, mock_popen):
mock_popen.return_value.__enter__.return_value.wait.return_value = 0
image = di.DockerImage("path", "name", False, gh_repo="")
result, _ = di.build_and_push_one_image(image, "version", [], True, True)
mock_popen.assert_called_once()
mock_machine.assert_not_called()
self.assertIn(
f"docker buildx build --builder default --label build-url={GITHUB_RUN_URL} "
"--build-arg FROM_TAG=version "
f"--build-arg CACHE_INVALIDATOR={GITHUB_RUN_URL} "
"--tag name:version --cache-from type=registry,ref=name:version "
"--cache-from type=registry,ref=name:latest "
"--cache-to type=inline,mode=max --push --progress plain path",
mock_popen.call_args.args,
)
self.assertTrue(result)
mock_popen.reset_mock()
mock_machine.reset_mock()
mock_popen.return_value.__enter__.return_value.wait.return_value = 0
result, _ = di.build_and_push_one_image(image, "version2", [], False, True)
mock_popen.assert_called_once()
mock_machine.assert_not_called()
self.assertIn(
f"docker buildx build --builder default --label build-url={GITHUB_RUN_URL} "
"--build-arg FROM_TAG=version2 "
f"--build-arg CACHE_INVALIDATOR={GITHUB_RUN_URL} "
"--tag name:version2 --cache-from type=registry,ref=name:version2 "
"--cache-from type=registry,ref=name:latest "
"--cache-to type=inline,mode=max --progress plain path",
mock_popen.call_args.args,
)
self.assertTrue(result)
mock_popen.reset_mock()
mock_machine.reset_mock()
mock_popen.return_value.__enter__.return_value.wait.return_value = 1
result, _ = di.build_and_push_one_image(image, "version2", [], False, False)
mock_popen.assert_called_once()
mock_machine.assert_not_called()
self.assertIn(
f"docker buildx build --builder default --label build-url={GITHUB_RUN_URL} "
f"--build-arg CACHE_INVALIDATOR={GITHUB_RUN_URL} "
"--tag name:version2 --cache-from type=registry,ref=name:version2 "
"--cache-from type=registry,ref=name:latest "
"--cache-to type=inline,mode=max --progress plain path",
mock_popen.call_args.args,
)
self.assertFalse(result)
mock_popen.reset_mock()
mock_machine.reset_mock()
mock_popen.return_value.__enter__.return_value.wait.return_value = 1
result, _ = di.build_and_push_one_image(
image, "version2", ["cached-version", "another-cached"], False, False
)
mock_popen.assert_called_once()
mock_machine.assert_not_called()
self.assertIn(
f"docker buildx build --builder default --label build-url={GITHUB_RUN_URL} "
f"--build-arg CACHE_INVALIDATOR={GITHUB_RUN_URL} "
"--tag name:version2 --cache-from type=registry,ref=name:version2 "
"--cache-from type=registry,ref=name:latest "
"--cache-from type=registry,ref=name:cached-version "
"--cache-from type=registry,ref=name:another-cached "
"--cache-to type=inline,mode=max --progress plain path",
mock_popen.call_args.args,
)
self.assertFalse(result)
mock_popen.reset_mock()
mock_machine.reset_mock()
only_amd64_image = di.DockerImage("path", "name", True)
mock_popen.return_value.__enter__.return_value.wait.return_value = 0
result, _ = di.build_and_push_one_image(
only_amd64_image, "version", [], True, True
)
mock_popen.assert_called_once()
mock_machine.assert_called_once()
self.assertIn(
"docker pull ubuntu:20.04; docker tag ubuntu:20.04 name:version; "
"docker push name:version",
mock_popen.call_args.args,
)
self.assertTrue(result)
result, _ = di.build_and_push_one_image(
only_amd64_image, "version", [], False, True
)
self.assertIn(
"docker pull ubuntu:20.04; docker tag ubuntu:20.04 name:version; ",
mock_popen.call_args.args,
)
with self.assertRaises(AssertionError):
result, _ = di.build_and_push_one_image(image, "version", [""], False, True)
@patch("docker_images_check.build_and_push_one_image")
def test_process_image_with_parents(self, mock_build):
mock_build.side_effect = lambda v, w, x, y, z: (True, Path(f"{v.repo}_{w}.log"))
im1 = di.DockerImage("path1", "repo1", False)
im2 = di.DockerImage("path2", "repo2", False, im1)
im3 = di.DockerImage("path3", "repo3", False, im2)
im4 = di.DockerImage("path4", "repo4", False, im1)
# We use list to have determined order of image builgings
images = [im4, im1, im3, im2, im1]
test_results = [
di.process_image_with_parents(im, ["v1", "v2", "latest"], [], True)
for im in images
]
# The time is random, so we check it's not None and greater than 0,
# and then set to 1
for results in test_results:
for result in results:
self.assertIsNotNone(result.time)
self.assertGreater(result.time, 0) # type: ignore
result.time = 1
self.maxDiff = None
expected = [
[ # repo4 -> repo1
TestResult("repo1:v1", "OK", 1, [Path("repo1_v1.log")]),
TestResult("repo1:v2", "OK", 1, [Path("repo1_v2.log")]),
TestResult("repo1:latest", "OK", 1, [Path("repo1_latest.log")]),
TestResult("repo4:v1", "OK", 1, [Path("repo4_v1.log")]),
TestResult("repo4:v2", "OK", 1, [Path("repo4_v2.log")]),
TestResult("repo4:latest", "OK", 1, [Path("repo4_latest.log")]),
],
[], # repo1 is built
[ # repo3 -> repo2 -> repo1
TestResult("repo2:v1", "OK", 1, [Path("repo2_v1.log")]),
TestResult("repo2:v2", "OK", 1, [Path("repo2_v2.log")]),
TestResult("repo2:latest", "OK", 1, [Path("repo2_latest.log")]),
TestResult("repo3:v1", "OK", 1, [Path("repo3_v1.log")]),
TestResult("repo3:v2", "OK", 1, [Path("repo3_v2.log")]),
TestResult("repo3:latest", "OK", 1, [Path("repo3_latest.log")]),
],
[], # repo2 -> repo1 are built
[], # repo1 is built
]
self.assertEqual(test_results, expected)
class TestDockerServer(unittest.TestCase):
def test_gen_tags(self):
version = get_version_from_string("22.2.2.2")

View File

@ -22,8 +22,8 @@ from commit_status_helper import (
post_commit_status,
update_mergeable_check,
)
from docker_images_helper import pull_image, get_docker_image
from env_helper import REPORT_PATH, TEMP_PATH
from docker_pull_helper import get_image_with_version
from env_helper import TEMP_PATH, REPORTS_PATH
from get_robot_token import get_best_robot_token
from pr_info import PRInfo
from report import ERROR, FAILURE, FAIL, OK, SUCCESS, TestResults, TestResult
@ -174,10 +174,7 @@ def main():
stopwatch = Stopwatch()
check_name = sys.argv[1] if len(sys.argv) > 1 else os.getenv("CHECK_NAME")
assert (
check_name
), "Check name must be provided as an input arg or in CHECK_NAME env"
check_name = sys.argv[1]
temp_path = Path(TEMP_PATH)
temp_path.mkdir(parents=True, exist_ok=True)
@ -194,9 +191,9 @@ def main():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = pull_image(get_docker_image(IMAGE_NAME))
docker_image = get_image_with_version(REPORTS_PATH, IMAGE_NAME)
download_unit_tests(check_name, REPORT_PATH, TEMP_PATH)
download_unit_tests(check_name, REPORTS_PATH, TEMP_PATH)
tests_binary = temp_path / "unit_tests_dbms"
os.chmod(tests_binary, 0o777)
@ -236,9 +233,7 @@ def main():
check_name,
)
print(f"::notice ::Report url: {report_url}")
post_commit_status(
commit, state, report_url, description, check_name, pr_info, dump_to_file=True
)
post_commit_status(commit, state, report_url, description, check_name, pr_info)
prepared_events = prepare_tests_results_for_clickhouse(
pr_info,

View File

@ -4,7 +4,6 @@
ROOT_PATH=$(git rev-parse --show-toplevel)
#FIXME: check all (or almost all) repo
codespell \
--skip "*generated*,*gperf*,*.bin,*.mrk*,*.idx,checksums.txt,*.dat,*.pyc,*.kate-swp,*obfuscateQueries.cpp,d3-*.js,*.min.js,*.sum,${ROOT_PATH}/utils/check-style/aspell-ignore" \
--ignore-words "${ROOT_PATH}/utils/check-style/codespell-ignore-words.list" \