ClickHouse/tests/ci/env_helper.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

123 lines
4.3 KiB
Python
Raw Normal View History

2023-09-05 12:37:37 +00:00
#!/usr/bin/env python
import logging
2021-11-26 14:00:09 +00:00
import os
from os import path as p
2023-09-05 12:37:37 +00:00
from typing import Tuple
from build_download_helper import get_gh_api
2022-08-09 16:34:12 +00:00
module_dir = p.abspath(p.dirname(__file__))
git_root = p.abspath(p.join(module_dir, "..", ".."))
2021-11-26 14:00:09 +00:00
CI = bool(os.getenv("CI"))
2022-05-25 13:15:11 +00:00
TEMP_PATH = os.getenv("TEMP_PATH", p.abspath(p.join(module_dir, "./tmp")))
2021-11-26 14:00:09 +00:00
CACHES_PATH = os.getenv("CACHES_PATH", TEMP_PATH)
CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN")
2022-05-16 18:39:10 +00:00
GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH", "")
GITHUB_JOB = os.getenv("GITHUB_JOB_OVERRIDDEN", "") or os.getenv("GITHUB_JOB", "local")
2021-11-26 14:00:09 +00:00
GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse")
GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0")
2021-11-26 14:00:09 +00:00
GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com")
GITHUB_WORKSPACE = os.getenv("GITHUB_WORKSPACE", git_root)
GITHUB_RUN_URL = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/actions/runs/{GITHUB_RUN_ID}"
2022-04-07 16:22:29 +00:00
IMAGES_PATH = os.getenv("IMAGES_PATH", TEMP_PATH)
REPORTS_PATH = os.getenv("REPORTS_PATH", p.abspath(p.join(module_dir, "./reports")))
REPO_COPY = os.getenv("REPO_COPY", GITHUB_WORKSPACE)
RUNNER_TEMP = os.getenv("RUNNER_TEMP", p.abspath(p.join(module_dir, "./tmp")))
2021-11-26 14:00:09 +00:00
S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds")
S3_TEST_REPORTS_BUCKET = os.getenv("S3_TEST_REPORTS_BUCKET", "clickhouse-test-reports")
2022-09-07 15:10:58 +00:00
S3_URL = os.getenv("S3_URL", "https://s3.amazonaws.com")
S3_DOWNLOAD = os.getenv("S3_DOWNLOAD", S3_URL)
S3_ARTIFACT_DOWNLOAD_TEMPLATE = (
f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/"
"{pr_or_release}/{commit}/{build_name}/{artifact}"
)
2022-08-09 16:34:12 +00:00
# These parameters are set only on demand, and only once
_GITHUB_JOB_ID = ""
_GITHUB_JOB_URL = ""
_GITHUB_JOB_API_URL = ""
2022-08-09 16:34:12 +00:00
def GITHUB_JOB_ID() -> str:
global _GITHUB_JOB_ID
global _GITHUB_JOB_URL
global _GITHUB_JOB_API_URL
2022-08-09 16:34:12 +00:00
if _GITHUB_JOB_ID:
return _GITHUB_JOB_ID
_GITHUB_JOB_ID, _GITHUB_JOB_URL, _GITHUB_JOB_API_URL = get_job_id_url(GITHUB_JOB)
2023-09-05 12:37:37 +00:00
return _GITHUB_JOB_ID
def GITHUB_JOB_URL() -> str:
GITHUB_JOB_ID()
return _GITHUB_JOB_URL
def GITHUB_JOB_API_URL() -> str:
GITHUB_JOB_ID()
return _GITHUB_JOB_API_URL
def get_job_id_url(job_name: str) -> Tuple[str, str, str]:
2023-09-05 12:37:37 +00:00
job_id = ""
job_url = ""
job_api_url = ""
2023-09-05 12:37:37 +00:00
if GITHUB_RUN_ID == "0":
job_id = "0"
if job_id:
return job_id, job_url, job_api_url
2022-08-09 16:34:12 +00:00
jobs = []
page = 1
2023-09-05 12:37:37 +00:00
while not job_id:
response = get_gh_api(
2022-08-09 16:34:12 +00:00
f"https://api.github.com/repos/{GITHUB_REPOSITORY}/"
f"actions/runs/{GITHUB_RUN_ID}/jobs?per_page=100&page={page}"
2022-08-09 16:34:12 +00:00
)
page += 1
2022-08-09 16:34:12 +00:00
data = response.json()
jobs.extend(data["jobs"])
for job in data["jobs"]:
2023-09-05 12:37:37 +00:00
if job["name"] != job_name:
2022-08-09 16:34:12 +00:00
continue
2023-09-05 12:37:37 +00:00
job_id = job["id"]
job_url = job["html_url"]
job_api_url = job["url"]
return job_id, job_url, job_api_url
if (
len(jobs) >= data["total_count"] # just in case of inconsistency
or len(data["jobs"]) == 0 # if we excided pages
):
2023-09-05 12:37:37 +00:00
job_id = "0"
2022-08-09 16:34:12 +00:00
# FIXME: until it's here, we can't move to reusable workflows
2023-09-05 12:37:37 +00:00
if not job_url:
# This is a terrible workaround for the case of another broken part of
2023-09-05 12:37:37 +00:00
# GitHub actions. For nested workflows it doesn't provide a proper job_name
# value, but only the final one. So, for `OriginalJob / NestedJob / FinalJob`
2023-09-05 12:37:37 +00:00
# full name, job_name contains only FinalJob
matched_jobs = []
for job in jobs:
nested_parts = job["name"].split(" / ")
if len(nested_parts) <= 1:
continue
2023-09-05 12:37:37 +00:00
if nested_parts[-1] == job_name:
matched_jobs.append(job)
if len(matched_jobs) == 1:
# The best case scenario
2023-09-05 12:37:37 +00:00
job_id = matched_jobs[0]["id"]
job_url = matched_jobs[0]["html_url"]
job_api_url = matched_jobs[0]["url"]
return job_id, job_url, job_api_url
if matched_jobs:
logging.error(
"We could not get the ID and URL for the current job name %s, there "
"are more than one jobs match it for the nested workflows. Please, "
"refer to https://github.com/actions/runner/issues/2577",
2023-09-05 12:37:37 +00:00
job_name,
)
return job_id, job_url, job_api_url