mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge pull request #44883 from ClickHouse/ci-report-type
Rework CI reports to have a class and clarify the logic
This commit is contained in:
commit
0be3dbad3c
@ -6,6 +6,8 @@ import argparse
|
||||
import csv
|
||||
|
||||
|
||||
# TODO: add typing and log files to the fourth column, think about launching
|
||||
# everything from the python and not bash
|
||||
def process_result(result_folder):
|
||||
status = "success"
|
||||
description = ""
|
||||
|
@ -7,6 +7,10 @@ import sys
|
||||
|
||||
from github import Github
|
||||
|
||||
from build_download_helper import get_build_name_for_check, read_build_urls
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from commit_status_helper import post_commit_status
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from env_helper import (
|
||||
GITHUB_REPOSITORY,
|
||||
GITHUB_RUN_URL,
|
||||
@ -14,15 +18,12 @@ from env_helper import (
|
||||
REPO_COPY,
|
||||
TEMP_PATH,
|
||||
)
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from build_download_helper import get_build_name_for_check, read_build_urls
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from commit_status_helper import post_commit_status
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from stopwatch import Stopwatch
|
||||
from report import TestResult
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
|
||||
IMAGE_NAME = "clickhouse/fuzzer"
|
||||
|
||||
@ -148,16 +149,15 @@ if __name__ == "__main__":
|
||||
status = "failure"
|
||||
description = "Task failed: $?=" + str(retcode)
|
||||
|
||||
test_result = TestResult(description, "OK")
|
||||
if "fail" in status:
|
||||
test_result = [(description, "FAIL")]
|
||||
else:
|
||||
test_result = [(description, "OK")]
|
||||
test_result.status = "FAIL"
|
||||
|
||||
ch_helper = ClickHouseHelper()
|
||||
|
||||
prepared_events = prepare_tests_results_for_clickhouse(
|
||||
pr_info,
|
||||
test_result,
|
||||
[test_result],
|
||||
status,
|
||||
stopwatch.duration_seconds,
|
||||
stopwatch.start_time_str,
|
||||
|
@ -1,18 +1,19 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from typing import List, Tuple
|
||||
import argparse
|
||||
import csv
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
|
||||
from github import Github
|
||||
|
||||
from s3_helper import S3Helper
|
||||
from commit_status_helper import post_commit_status
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, TestResult
|
||||
from s3_helper import S3Helper
|
||||
from upload_result_helper import upload_results
|
||||
from commit_status_helper import post_commit_status
|
||||
|
||||
|
||||
def parse_args():
|
||||
@ -21,11 +22,9 @@ def parse_args():
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def post_commit_status_from_file(file_path):
|
||||
res = []
|
||||
def post_commit_status_from_file(file_path: str) -> List[str]:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
fin = csv.reader(f, delimiter="\t")
|
||||
res = list(itertools.islice(fin, 1))
|
||||
res = list(csv.reader(f, delimiter="\t"))
|
||||
if len(res) < 1:
|
||||
raise Exception(f'Can\'t read from "{file_path}"')
|
||||
if len(res[0]) != 3:
|
||||
@ -33,22 +32,22 @@ def post_commit_status_from_file(file_path):
|
||||
return res[0]
|
||||
|
||||
|
||||
def process_result(file_path):
|
||||
test_results = []
|
||||
def process_result(file_path: str) -> Tuple[bool, TestResults]:
|
||||
test_results = [] # type: TestResults
|
||||
state, report_url, description = post_commit_status_from_file(file_path)
|
||||
prefix = os.path.basename(os.path.dirname(file_path))
|
||||
is_ok = state == "success"
|
||||
if is_ok and report_url == "null":
|
||||
return is_ok, None
|
||||
return is_ok, test_results
|
||||
|
||||
status = f'OK: Bug reproduced (<a href="{report_url}">Report</a>)'
|
||||
if not is_ok:
|
||||
status = f'Bug is not reproduced (<a href="{report_url}">Report</a>)'
|
||||
test_results.append([f"{prefix}: {description}", status])
|
||||
test_results.append(TestResult(f"{prefix}: {description}", status))
|
||||
return is_ok, test_results
|
||||
|
||||
|
||||
def process_all_results(file_paths):
|
||||
def process_all_results(file_paths: str) -> Tuple[bool, TestResults]:
|
||||
any_ok = False
|
||||
all_results = []
|
||||
for status_path in file_paths:
|
||||
|
@ -10,13 +10,14 @@ from typing import Dict, List, Tuple
|
||||
from github import Github
|
||||
|
||||
from env_helper import (
|
||||
GITHUB_JOB_URL,
|
||||
GITHUB_REPOSITORY,
|
||||
GITHUB_RUN_URL,
|
||||
GITHUB_SERVER_URL,
|
||||
REPORTS_PATH,
|
||||
TEMP_PATH,
|
||||
)
|
||||
from report import create_build_html_report
|
||||
from report import create_build_html_report, BuildResult, BuildResults
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import NeedsDataType, PRInfo
|
||||
@ -31,24 +32,6 @@ from rerun_helper import RerunHelper
|
||||
NEEDS_DATA_PATH = os.getenv("NEEDS_DATA_PATH", "")
|
||||
|
||||
|
||||
class BuildResult:
|
||||
def __init__(
|
||||
self,
|
||||
compiler,
|
||||
build_type,
|
||||
sanitizer,
|
||||
status,
|
||||
elapsed_seconds,
|
||||
with_coverage,
|
||||
):
|
||||
self.compiler = compiler
|
||||
self.build_type = build_type
|
||||
self.sanitizer = sanitizer
|
||||
self.status = status
|
||||
self.elapsed_seconds = elapsed_seconds
|
||||
self.with_coverage = with_coverage
|
||||
|
||||
|
||||
def group_by_artifacts(build_urls: List[str]) -> Dict[str, List[str]]:
|
||||
groups = {
|
||||
"apk": [],
|
||||
@ -81,7 +64,7 @@ def group_by_artifacts(build_urls: List[str]) -> Dict[str, List[str]]:
|
||||
|
||||
def get_failed_report(
|
||||
job_name: str,
|
||||
) -> Tuple[List[BuildResult], List[List[str]], List[str]]:
|
||||
) -> Tuple[BuildResults, List[List[str]], List[str]]:
|
||||
message = f"{job_name} failed"
|
||||
build_result = BuildResult(
|
||||
compiler="unknown",
|
||||
@ -89,14 +72,13 @@ def get_failed_report(
|
||||
sanitizer="unknown",
|
||||
status=message,
|
||||
elapsed_seconds=0,
|
||||
with_coverage=False,
|
||||
)
|
||||
return [build_result], [[""]], [GITHUB_RUN_URL]
|
||||
|
||||
|
||||
def process_report(
|
||||
build_report: dict,
|
||||
) -> Tuple[List[BuildResult], List[List[str]], List[str]]:
|
||||
) -> Tuple[BuildResults, List[List[str]], List[str]]:
|
||||
build_config = build_report["build_config"]
|
||||
build_result = BuildResult(
|
||||
compiler=build_config["compiler"],
|
||||
@ -104,7 +86,6 @@ def process_report(
|
||||
sanitizer=build_config["sanitizer"],
|
||||
status="success" if build_report["status"] else "failure",
|
||||
elapsed_seconds=build_report["elapsed_seconds"],
|
||||
with_coverage=False,
|
||||
)
|
||||
build_results = []
|
||||
build_urls = []
|
||||
@ -207,9 +188,9 @@ def main():
|
||||
logging.info("Got exactly %s builds", len(builds_report_map))
|
||||
|
||||
# Group build artifacts by groups
|
||||
build_results = [] # type: List[BuildResult]
|
||||
build_artifacts = [] #
|
||||
build_logs = []
|
||||
build_results = [] # type: BuildResults
|
||||
build_artifacts = [] # type: List[List[str]]
|
||||
build_logs = [] # type: List[str]
|
||||
|
||||
for build_report in build_reports:
|
||||
_build_results, build_artifacts_url, build_logs_url = process_report(
|
||||
@ -244,7 +225,7 @@ def main():
|
||||
branch_name = f"PR #{pr_info.number}"
|
||||
branch_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/pull/{pr_info.number}"
|
||||
commit_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/commit/{pr_info.sha}"
|
||||
task_url = GITHUB_RUN_URL
|
||||
task_url = GITHUB_JOB_URL()
|
||||
report = create_build_html_report(
|
||||
build_check_name,
|
||||
build_results,
|
||||
|
@ -1,10 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
import time
|
||||
import logging
|
||||
from typing import List
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
|
||||
import requests # type: ignore
|
||||
|
||||
from get_robot_token import get_parameter_from_ssm
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults
|
||||
|
||||
|
||||
class InsertException(Exception):
|
||||
@ -129,14 +133,14 @@ class ClickHouseHelper:
|
||||
|
||||
|
||||
def prepare_tests_results_for_clickhouse(
|
||||
pr_info,
|
||||
test_results,
|
||||
check_status,
|
||||
check_duration,
|
||||
check_start_time,
|
||||
report_url,
|
||||
check_name,
|
||||
):
|
||||
pr_info: PRInfo,
|
||||
test_results: TestResults,
|
||||
check_status: str,
|
||||
check_duration: float,
|
||||
check_start_time: str,
|
||||
report_url: str,
|
||||
check_name: str,
|
||||
) -> List[dict]:
|
||||
|
||||
pull_request_url = "https://github.com/ClickHouse/ClickHouse/commits/master"
|
||||
base_ref = "master"
|
||||
@ -172,13 +176,11 @@ def prepare_tests_results_for_clickhouse(
|
||||
result = [common_properties]
|
||||
for test_result in test_results:
|
||||
current_row = common_properties.copy()
|
||||
test_name = test_result[0]
|
||||
test_status = test_result[1]
|
||||
test_name = test_result.name
|
||||
test_status = test_result.status
|
||||
|
||||
test_time = 0
|
||||
if len(test_result) > 2 and test_result[2]:
|
||||
test_time = test_result[2]
|
||||
current_row["test_duration_ms"] = int(float(test_time) * 1000)
|
||||
test_time = test_result.time or 0
|
||||
current_row["test_duration_ms"] = int(test_time * 1000)
|
||||
current_row["test_name"] = test_name
|
||||
current_row["test_status"] = test_status
|
||||
result.append(current_row)
|
||||
@ -186,7 +188,9 @@ def prepare_tests_results_for_clickhouse(
|
||||
return result
|
||||
|
||||
|
||||
def mark_flaky_tests(clickhouse_helper, check_name, test_results):
|
||||
def mark_flaky_tests(
|
||||
clickhouse_helper: ClickHouseHelper, check_name: str, test_results: TestResults
|
||||
) -> None:
|
||||
try:
|
||||
query = f"""SELECT DISTINCT test_name
|
||||
FROM checks
|
||||
@ -202,7 +206,7 @@ WHERE
|
||||
logging.info("Found flaky tests: %s", ", ".join(master_failed_tests))
|
||||
|
||||
for test_result in test_results:
|
||||
if test_result[1] == "FAIL" and test_result[0] in master_failed_tests:
|
||||
test_result[1] = "FLAKY"
|
||||
if test_result.status == "FAIL" and test_result.name in master_failed_tests:
|
||||
test_result.status = "FLAKY"
|
||||
except Exception as ex:
|
||||
logging.error("Exception happened during flaky tests fetch %s", ex)
|
||||
|
@ -7,6 +7,8 @@ import logging
|
||||
|
||||
from github import Github
|
||||
|
||||
from commit_status_helper import post_commit_status
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from env_helper import (
|
||||
IMAGES_PATH,
|
||||
REPO_COPY,
|
||||
@ -14,10 +16,9 @@ from env_helper import (
|
||||
S3_TEST_REPORTS_BUCKET,
|
||||
TEMP_PATH,
|
||||
)
|
||||
from commit_status_helper import post_commit_status
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from report import TestResult
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
@ -80,9 +81,9 @@ if __name__ == "__main__":
|
||||
"HTML report</a>"
|
||||
)
|
||||
|
||||
test_results = [(index_html, "Look at the report")]
|
||||
test_result = TestResult(index_html, "Look at the report")
|
||||
|
||||
report_url = upload_results(s3_helper, 0, pr_info.sha, test_results, [], NAME)
|
||||
report_url = upload_results(s3_helper, 0, pr_info.sha, [test_result], [], NAME)
|
||||
|
||||
print(f"::notice ::Report url: {report_url}")
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from distutils.version import StrictVersion
|
||||
from typing import List, Tuple
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
@ -8,21 +9,22 @@ import sys
|
||||
|
||||
from github import Github
|
||||
|
||||
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from build_download_helper import download_builds_filter
|
||||
from upload_result_helper import upload_results
|
||||
from docker_pull_helper import get_images_with_versions
|
||||
from commit_status_helper import post_commit_status
|
||||
from clickhouse_helper import (
|
||||
ClickHouseHelper,
|
||||
mark_flaky_tests,
|
||||
prepare_tests_results_for_clickhouse,
|
||||
)
|
||||
from stopwatch import Stopwatch
|
||||
from commit_status_helper import post_commit_status
|
||||
from docker_pull_helper import get_images_with_versions
|
||||
from env_helper import TEMP_PATH, REPORTS_PATH
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, TestResult
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
IMAGE_UBUNTU = "clickhouse/test-old-ubuntu"
|
||||
IMAGE_CENTOS = "clickhouse/test-old-centos"
|
||||
@ -31,18 +33,18 @@ DOWNLOAD_RETRIES_COUNT = 5
|
||||
CHECK_NAME = "Compatibility check"
|
||||
|
||||
|
||||
def process_os_check(log_path):
|
||||
def process_os_check(log_path: str) -> TestResult:
|
||||
name = os.path.basename(log_path)
|
||||
with open(log_path, "r") as log:
|
||||
line = log.read().split("\n")[0].strip()
|
||||
if line != "OK":
|
||||
return (name, "FAIL")
|
||||
return TestResult(name, "FAIL")
|
||||
else:
|
||||
return (name, "OK")
|
||||
return TestResult(name, "OK")
|
||||
|
||||
|
||||
def process_glibc_check(log_path):
|
||||
bad_lines = []
|
||||
def process_glibc_check(log_path: str) -> TestResults:
|
||||
test_results = [] # type: TestResults
|
||||
with open(log_path, "r") as log:
|
||||
for line in log:
|
||||
if line.strip():
|
||||
@ -50,32 +52,36 @@ def process_glibc_check(log_path):
|
||||
symbol_with_glibc = columns[-2] # sysconf@GLIBC_2.2.5
|
||||
_, version = symbol_with_glibc.split("@GLIBC_")
|
||||
if version == "PRIVATE":
|
||||
bad_lines.append((symbol_with_glibc, "FAIL"))
|
||||
test_results.append(TestResult(symbol_with_glibc, "FAIL"))
|
||||
elif StrictVersion(version) > MAX_GLIBC_VERSION:
|
||||
bad_lines.append((symbol_with_glibc, "FAIL"))
|
||||
if not bad_lines:
|
||||
bad_lines.append(("glibc check", "OK"))
|
||||
return bad_lines
|
||||
test_results.append(TestResult(symbol_with_glibc, "FAIL"))
|
||||
if not test_results:
|
||||
test_results.append(TestResult("glibc check", "OK"))
|
||||
return test_results
|
||||
|
||||
|
||||
def process_result(result_folder, server_log_folder):
|
||||
summary = process_glibc_check(os.path.join(result_folder, "glibc.log"))
|
||||
def process_result(
|
||||
result_folder: str, server_log_folder: str
|
||||
) -> Tuple[str, str, TestResults, List[str]]:
|
||||
test_results = process_glibc_check(os.path.join(result_folder, "glibc.log"))
|
||||
|
||||
status = "success"
|
||||
description = "Compatibility check passed"
|
||||
if len(summary) > 1 or summary[0][1] != "OK":
|
||||
if len(test_results) > 1 or test_results[0].status != "OK":
|
||||
status = "failure"
|
||||
description = "glibc check failed"
|
||||
|
||||
if status == "success":
|
||||
for operating_system in ("ubuntu:12.04", "centos:5"):
|
||||
result = process_os_check(os.path.join(result_folder, operating_system))
|
||||
if result[1] != "OK":
|
||||
test_result = process_os_check(
|
||||
os.path.join(result_folder, operating_system)
|
||||
)
|
||||
if test_result.status != "OK":
|
||||
status = "failure"
|
||||
description = f"Old {operating_system} failed"
|
||||
summary += [result]
|
||||
test_results += [test_result]
|
||||
break
|
||||
summary += [result]
|
||||
test_results += [test_result]
|
||||
|
||||
server_log_path = os.path.join(server_log_folder, "clickhouse-server.log")
|
||||
stderr_log_path = os.path.join(server_log_folder, "stderr.log")
|
||||
@ -90,7 +96,7 @@ def process_result(result_folder, server_log_folder):
|
||||
if os.path.exists(client_stderr_log_path):
|
||||
result_logs.append(client_stderr_log_path)
|
||||
|
||||
return status, description, summary, result_logs
|
||||
return status, description, test_results, result_logs
|
||||
|
||||
|
||||
def get_run_commands(
|
||||
@ -109,13 +115,12 @@ def get_run_commands(
|
||||
]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
stopwatch = Stopwatch()
|
||||
|
||||
temp_path = TEMP_PATH
|
||||
repo_path = REPO_COPY
|
||||
reports_path = REPORTS_PATH
|
||||
|
||||
pr_info = PRInfo()
|
||||
@ -201,5 +206,9 @@ if __name__ == "__main__":
|
||||
|
||||
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||
|
||||
if state == "error":
|
||||
if state == "failure":
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -8,6 +8,7 @@ import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
from github import Github
|
||||
@ -17,6 +18,7 @@ from commit_status_helper import post_commit_status
|
||||
from env_helper import GITHUB_WORKSPACE, RUNNER_TEMP, GITHUB_RUN_URL
|
||||
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, TestResult
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from upload_result_helper import upload_results
|
||||
@ -182,11 +184,12 @@ def build_and_push_dummy_image(
|
||||
image: DockerImage,
|
||||
version_string: str,
|
||||
push: bool,
|
||||
) -> Tuple[bool, str]:
|
||||
) -> Tuple[bool, Path]:
|
||||
dummy_source = "ubuntu:20.04"
|
||||
logging.info("Building docker image %s as %s", image.repo, dummy_source)
|
||||
build_log = os.path.join(
|
||||
TEMP_PATH, f"build_and_push_log_{image.repo.replace('/', '_')}_{version_string}"
|
||||
build_log = (
|
||||
Path(TEMP_PATH)
|
||||
/ f"build_and_push_log_{image.repo.replace('/', '_')}_{version_string}.log"
|
||||
)
|
||||
with open(build_log, "wb") as bl:
|
||||
cmd = (
|
||||
@ -213,7 +216,7 @@ def build_and_push_one_image(
|
||||
additional_cache: str,
|
||||
push: bool,
|
||||
child: bool,
|
||||
) -> Tuple[bool, str]:
|
||||
) -> Tuple[bool, Path]:
|
||||
if image.only_amd64 and platform.machine() not in ["amd64", "x86_64"]:
|
||||
return build_and_push_dummy_image(image, version_string, push)
|
||||
logging.info(
|
||||
@ -222,8 +225,9 @@ def build_and_push_one_image(
|
||||
version_string,
|
||||
image.full_path,
|
||||
)
|
||||
build_log = os.path.join(
|
||||
TEMP_PATH, f"build_and_push_log_{image.repo.replace('/', '_')}_{version_string}"
|
||||
build_log = (
|
||||
Path(TEMP_PATH)
|
||||
/ f"build_and_push_log_{image.repo.replace('/', '_')}_{version_string}.log"
|
||||
)
|
||||
push_arg = ""
|
||||
if push:
|
||||
@ -273,27 +277,42 @@ def process_single_image(
|
||||
additional_cache: str,
|
||||
push: bool,
|
||||
child: bool,
|
||||
) -> List[Tuple[str, str, str]]:
|
||||
) -> TestResults:
|
||||
logging.info("Image will be pushed with versions %s", ", ".join(versions))
|
||||
result = []
|
||||
results = [] # type: TestResults
|
||||
for ver in versions:
|
||||
stopwatch = Stopwatch()
|
||||
for i in range(5):
|
||||
success, build_log = build_and_push_one_image(
|
||||
image, ver, additional_cache, push, child
|
||||
)
|
||||
if success:
|
||||
result.append((image.repo + ":" + ver, build_log, "OK"))
|
||||
results.append(
|
||||
TestResult(
|
||||
image.repo + ":" + ver,
|
||||
"OK",
|
||||
stopwatch.duration_seconds,
|
||||
[build_log],
|
||||
)
|
||||
)
|
||||
break
|
||||
logging.info(
|
||||
"Got error will retry %s time and sleep for %s seconds", i, i * 5
|
||||
)
|
||||
time.sleep(i * 5)
|
||||
else:
|
||||
result.append((image.repo + ":" + ver, build_log, "FAIL"))
|
||||
results.append(
|
||||
TestResult(
|
||||
image.repo + ":" + ver,
|
||||
"FAIL",
|
||||
stopwatch.duration_seconds,
|
||||
[build_log],
|
||||
)
|
||||
)
|
||||
|
||||
logging.info("Processing finished")
|
||||
image.built = True
|
||||
return result
|
||||
return results
|
||||
|
||||
|
||||
def process_image_with_parents(
|
||||
@ -302,41 +321,19 @@ def process_image_with_parents(
|
||||
additional_cache: str,
|
||||
push: bool,
|
||||
child: bool = False,
|
||||
) -> List[Tuple[str, str, str]]:
|
||||
result = [] # type: List[Tuple[str,str,str]]
|
||||
) -> TestResults:
|
||||
results = [] # type: TestResults
|
||||
if image.built:
|
||||
return result
|
||||
return results
|
||||
|
||||
if image.parent is not None:
|
||||
result += process_image_with_parents(
|
||||
results += process_image_with_parents(
|
||||
image.parent, versions, additional_cache, push, False
|
||||
)
|
||||
child = True
|
||||
|
||||
result += process_single_image(image, versions, additional_cache, push, child)
|
||||
return result
|
||||
|
||||
|
||||
def process_test_results(
|
||||
s3_client: S3Helper, test_results: List[Tuple[str, str, str]], s3_path_prefix: str
|
||||
) -> Tuple[str, List[Tuple[str, str]]]:
|
||||
overall_status = "success"
|
||||
processed_test_results = []
|
||||
for image, build_log, status in test_results:
|
||||
if status != "OK":
|
||||
overall_status = "failure"
|
||||
url_part = ""
|
||||
if build_log is not None and os.path.exists(build_log):
|
||||
build_url = s3_client.upload_test_report_to_s3(
|
||||
build_log, s3_path_prefix + "/" + os.path.basename(build_log)
|
||||
)
|
||||
url_part += f'<a href="{build_url}">build_log</a>'
|
||||
if url_part:
|
||||
test_name = image + " (" + url_part + ")"
|
||||
else:
|
||||
test_name = image
|
||||
processed_test_results.append((test_name, status))
|
||||
return overall_status, processed_test_results
|
||||
results += process_single_image(image, versions, additional_cache, push, child)
|
||||
return results
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
@ -440,7 +437,7 @@ def main():
|
||||
image_versions, result_version = gen_versions(pr_info, args.suffix)
|
||||
|
||||
result_images = {}
|
||||
images_processing_result = []
|
||||
test_results = [] # type: TestResults
|
||||
additional_cache = ""
|
||||
if pr_info.release_pr or pr_info.merged_pr:
|
||||
additional_cache = str(pr_info.release_pr or pr_info.merged_pr)
|
||||
@ -448,7 +445,7 @@ def main():
|
||||
for image in changed_images:
|
||||
# If we are in backport PR, then pr_info.release_pr is defined
|
||||
# We use it as tag to reduce rebuilding time
|
||||
images_processing_result += process_image_with_parents(
|
||||
test_results += process_image_with_parents(
|
||||
image, image_versions, additional_cache, args.push
|
||||
)
|
||||
result_images[image.repo] = result_version
|
||||
@ -466,12 +463,9 @@ def main():
|
||||
|
||||
s3_helper = S3Helper()
|
||||
|
||||
s3_path_prefix = (
|
||||
str(pr_info.number) + "/" + pr_info.sha + "/" + NAME.lower().replace(" ", "_")
|
||||
)
|
||||
status, test_results = process_test_results(
|
||||
s3_helper, images_processing_result, s3_path_prefix
|
||||
)
|
||||
status = "success"
|
||||
if [r for r in test_results if r.status != "OK"]:
|
||||
status = "failure"
|
||||
|
||||
url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME)
|
||||
|
||||
@ -495,7 +489,7 @@ def main():
|
||||
ch_helper = ClickHouseHelper()
|
||||
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||
|
||||
if status == "error":
|
||||
if status == "failure":
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
@ -14,6 +14,7 @@ from commit_status_helper import post_commit_status
|
||||
from env_helper import RUNNER_TEMP
|
||||
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, TestResult
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from upload_result_helper import upload_results
|
||||
@ -189,11 +190,11 @@ def main():
|
||||
merged = merge_images(to_merge)
|
||||
|
||||
status = "success"
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
test_results = [] # type: TestResults
|
||||
for image, versions in merged.items():
|
||||
for tags in versions:
|
||||
manifest, test_result = create_manifest(image, tags, args.push)
|
||||
test_results.append((manifest, test_result))
|
||||
test_results.append(TestResult(manifest, test_result))
|
||||
if test_result != "OK":
|
||||
status = "failure"
|
||||
|
||||
|
@ -7,8 +7,9 @@ import logging
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from os import path as p, makedirs
|
||||
from typing import List, Tuple
|
||||
from typing import List
|
||||
|
||||
from github import Github
|
||||
|
||||
@ -20,8 +21,10 @@ from env_helper import CI, GITHUB_RUN_URL, RUNNER_TEMP, S3_BUILDS_BUCKET, S3_DOW
|
||||
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
|
||||
from git_helper import Git
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, TestResult
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
from version_helper import (
|
||||
ClickHouseVersion,
|
||||
@ -116,7 +119,7 @@ def parse_args() -> argparse.Namespace:
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def retry_popen(cmd: str) -> int:
|
||||
def retry_popen(cmd: str, log_file: Path) -> int:
|
||||
max_retries = 5
|
||||
for retry in range(max_retries):
|
||||
# From time to time docker build may failed. Curl issues, or even push
|
||||
@ -129,18 +132,14 @@ def retry_popen(cmd: str) -> int:
|
||||
cmd,
|
||||
)
|
||||
time.sleep(progressive_sleep)
|
||||
with subprocess.Popen(
|
||||
with TeePopen(
|
||||
cmd,
|
||||
shell=True,
|
||||
stderr=subprocess.STDOUT,
|
||||
stdout=subprocess.PIPE,
|
||||
universal_newlines=True,
|
||||
log_file=log_file,
|
||||
) as process:
|
||||
for line in process.stdout: # type: ignore
|
||||
print(line, end="")
|
||||
retcode = process.wait()
|
||||
if retcode == 0:
|
||||
return 0
|
||||
|
||||
return retcode
|
||||
|
||||
|
||||
@ -235,8 +234,8 @@ def build_and_push_image(
|
||||
os: str,
|
||||
tag: str,
|
||||
version: ClickHouseVersion,
|
||||
) -> List[Tuple[str, str]]:
|
||||
result = []
|
||||
) -> TestResults:
|
||||
result = [] # type: TestResults
|
||||
if os != "ubuntu":
|
||||
tag += f"-{os}"
|
||||
init_args = ["docker", "buildx", "build", "--build-arg BUILDKIT_INLINE_CACHE=1"]
|
||||
@ -250,7 +249,9 @@ def build_and_push_image(
|
||||
# `docker buildx build --load` does not support multiple images currently
|
||||
# images must be built separately and merged together with `docker manifest`
|
||||
digests = []
|
||||
multiplatform_sw = Stopwatch()
|
||||
for arch in BUCKETS:
|
||||
single_sw = Stopwatch()
|
||||
arch_tag = f"{tag}-{arch}"
|
||||
metadata_path = p.join(TEMP_PATH, arch_tag)
|
||||
dockerfile = p.join(image.full_path, f"Dockerfile.{os}")
|
||||
@ -269,10 +270,25 @@ def build_and_push_image(
|
||||
)
|
||||
cmd = " ".join(cmd_args)
|
||||
logging.info("Building image %s:%s for arch %s: %s", image.repo, tag, arch, cmd)
|
||||
if retry_popen(cmd) != 0:
|
||||
result.append((f"{image.repo}:{tag}-{arch}", "FAIL"))
|
||||
log_file = Path(TEMP_PATH) / f"{image.repo.replace('/', '__')}:{tag}-{arch}.log"
|
||||
if retry_popen(cmd, log_file) != 0:
|
||||
result.append(
|
||||
TestResult(
|
||||
f"{image.repo}:{tag}-{arch}",
|
||||
"FAIL",
|
||||
single_sw.duration_seconds,
|
||||
[log_file],
|
||||
)
|
||||
)
|
||||
return result
|
||||
result.append((f"{image.repo}:{tag}-{arch}", "OK"))
|
||||
result.append(
|
||||
TestResult(
|
||||
f"{image.repo}:{tag}-{arch}",
|
||||
"OK",
|
||||
single_sw.duration_seconds,
|
||||
[log_file],
|
||||
)
|
||||
)
|
||||
with open(metadata_path, "rb") as m:
|
||||
metadata = json.load(m)
|
||||
digests.append(metadata["containerimage.digest"])
|
||||
@ -282,9 +298,16 @@ def build_and_push_image(
|
||||
f"--tag {image.repo}:{tag} {' '.join(digests)}"
|
||||
)
|
||||
logging.info("Pushing merged %s:%s image: %s", image.repo, tag, cmd)
|
||||
if retry_popen(cmd) != 0:
|
||||
result.append((f"{image.repo}:{tag}", "FAIL"))
|
||||
if retry_popen(cmd, Path("/dev/null")) != 0:
|
||||
result.append(
|
||||
TestResult(
|
||||
f"{image.repo}:{tag}", "FAIL", multiplatform_sw.duration_seconds
|
||||
)
|
||||
)
|
||||
return result
|
||||
result.append(
|
||||
TestResult(f"{image.repo}:{tag}", "OK", multiplatform_sw.duration_seconds)
|
||||
)
|
||||
else:
|
||||
logging.info(
|
||||
"Merging is available only on push, separate %s images are created",
|
||||
@ -323,7 +346,7 @@ def main():
|
||||
|
||||
logging.info("Following tags will be created: %s", ", ".join(tags))
|
||||
status = "success"
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
test_results = [] # type: TestResults
|
||||
for os in args.os:
|
||||
for tag in tags:
|
||||
test_results.extend(
|
||||
@ -331,7 +354,7 @@ def main():
|
||||
image, args.push, args.bucket_prefix, os, tag, args.version
|
||||
)
|
||||
)
|
||||
if test_results[-1][1] != "OK":
|
||||
if test_results[-1].status != "OK":
|
||||
status = "failure"
|
||||
|
||||
pr_info = pr_info or PRInfo()
|
||||
|
@ -3,9 +3,11 @@
|
||||
import os
|
||||
import unittest
|
||||
from unittest.mock import patch, MagicMock
|
||||
from pathlib import Path
|
||||
|
||||
from env_helper import GITHUB_RUN_URL
|
||||
from pr_info import PRInfo
|
||||
from report import TestResult
|
||||
import docker_images_check as di
|
||||
|
||||
with patch("git_helper.Git"):
|
||||
@ -223,40 +225,48 @@ class TestDockerImageCheck(unittest.TestCase):
|
||||
|
||||
@patch("docker_images_check.build_and_push_one_image")
|
||||
def test_process_image_with_parents(self, mock_build):
|
||||
mock_build.side_effect = lambda v, w, x, y, z: (True, f"{v.repo}_{w}.log")
|
||||
mock_build.side_effect = lambda v, w, x, y, z: (True, Path(f"{v.repo}_{w}.log"))
|
||||
im1 = di.DockerImage("path1", "repo1", False)
|
||||
im2 = di.DockerImage("path2", "repo2", False, im1)
|
||||
im3 = di.DockerImage("path3", "repo3", False, im2)
|
||||
im4 = di.DockerImage("path4", "repo4", False, im1)
|
||||
# We use list to have determined order of image builgings
|
||||
images = [im4, im1, im3, im2, im1]
|
||||
results = [
|
||||
test_results = [
|
||||
di.process_image_with_parents(im, ["v1", "v2", "latest"], "", True)
|
||||
for im in images
|
||||
]
|
||||
# The time is random, so we check it's not None and greater than 0,
|
||||
# and then set to 1
|
||||
for results in test_results:
|
||||
for result in results:
|
||||
self.assertIsNotNone(result.time)
|
||||
self.assertGreater(result.time, 0) # type: ignore
|
||||
result.time = 1
|
||||
|
||||
self.maxDiff = None
|
||||
expected = [
|
||||
[ # repo4 -> repo1
|
||||
("repo1:v1", "repo1_v1.log", "OK"),
|
||||
("repo1:v2", "repo1_v2.log", "OK"),
|
||||
("repo1:latest", "repo1_latest.log", "OK"),
|
||||
("repo4:v1", "repo4_v1.log", "OK"),
|
||||
("repo4:v2", "repo4_v2.log", "OK"),
|
||||
("repo4:latest", "repo4_latest.log", "OK"),
|
||||
TestResult("repo1:v1", "OK", 1, [Path("repo1_v1.log")]),
|
||||
TestResult("repo1:v2", "OK", 1, [Path("repo1_v2.log")]),
|
||||
TestResult("repo1:latest", "OK", 1, [Path("repo1_latest.log")]),
|
||||
TestResult("repo4:v1", "OK", 1, [Path("repo4_v1.log")]),
|
||||
TestResult("repo4:v2", "OK", 1, [Path("repo4_v2.log")]),
|
||||
TestResult("repo4:latest", "OK", 1, [Path("repo4_latest.log")]),
|
||||
],
|
||||
[], # repo1 is built
|
||||
[ # repo3 -> repo2 -> repo1
|
||||
("repo2:v1", "repo2_v1.log", "OK"),
|
||||
("repo2:v2", "repo2_v2.log", "OK"),
|
||||
("repo2:latest", "repo2_latest.log", "OK"),
|
||||
("repo3:v1", "repo3_v1.log", "OK"),
|
||||
("repo3:v2", "repo3_v2.log", "OK"),
|
||||
("repo3:latest", "repo3_latest.log", "OK"),
|
||||
TestResult("repo2:v1", "OK", 1, [Path("repo2_v1.log")]),
|
||||
TestResult("repo2:v2", "OK", 1, [Path("repo2_v2.log")]),
|
||||
TestResult("repo2:latest", "OK", 1, [Path("repo2_latest.log")]),
|
||||
TestResult("repo3:v1", "OK", 1, [Path("repo3_v1.log")]),
|
||||
TestResult("repo3:v2", "OK", 1, [Path("repo3_v2.log")]),
|
||||
TestResult("repo3:latest", "OK", 1, [Path("repo3_latest.log")]),
|
||||
],
|
||||
[], # repo2 -> repo1 are built
|
||||
[], # repo1 is built
|
||||
]
|
||||
self.assertEqual(results, expected)
|
||||
self.assertEqual(test_results, expected)
|
||||
|
||||
|
||||
class TestDockerServer(unittest.TestCase):
|
||||
|
@ -4,24 +4,27 @@ import logging
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
||||
from github import Github
|
||||
|
||||
from env_helper import TEMP_PATH, REPO_COPY
|
||||
from s3_helper import S3Helper
|
||||
from pr_info import PRInfo
|
||||
from get_robot_token import get_best_robot_token
|
||||
from upload_result_helper import upload_results
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from commit_status_helper import post_commit_status, get_commit
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from stopwatch import Stopwatch
|
||||
from commit_status_helper import post_commit_status, get_commit
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from env_helper import TEMP_PATH, REPO_COPY
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, TestResult
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
|
||||
NAME = "Docs Check"
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||
description="Script to check the docs integrity",
|
||||
@ -98,7 +101,7 @@ if __name__ == "__main__":
|
||||
|
||||
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
|
||||
files = os.listdir(test_output)
|
||||
lines = []
|
||||
test_results = [] # type: TestResults
|
||||
additional_files = []
|
||||
if not files:
|
||||
logging.error("No output files after docs check")
|
||||
@ -111,27 +114,27 @@ if __name__ == "__main__":
|
||||
with open(path, "r", encoding="utf-8") as check_file:
|
||||
for line in check_file:
|
||||
if "ERROR" in line:
|
||||
lines.append((line.split(":")[-1], "FAIL"))
|
||||
if lines:
|
||||
test_results.append(TestResult(line.split(":")[-1], "FAIL"))
|
||||
if test_results:
|
||||
status = "failure"
|
||||
description = "Found errors in docs"
|
||||
elif status != "failure":
|
||||
lines.append(("No errors found", "OK"))
|
||||
test_results.append(TestResult("No errors found", "OK"))
|
||||
else:
|
||||
lines.append(("Non zero exit code", "FAIL"))
|
||||
test_results.append(TestResult("Non zero exit code", "FAIL"))
|
||||
|
||||
s3_helper = S3Helper()
|
||||
ch_helper = ClickHouseHelper()
|
||||
|
||||
report_url = upload_results(
|
||||
s3_helper, pr_info.number, pr_info.sha, lines, additional_files, NAME
|
||||
s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME
|
||||
)
|
||||
print("::notice ::Report url: {report_url}")
|
||||
post_commit_status(gh, pr_info.sha, NAME, description, status, report_url)
|
||||
|
||||
prepared_events = prepare_tests_results_for_clickhouse(
|
||||
pr_info,
|
||||
lines,
|
||||
test_results,
|
||||
status,
|
||||
stopwatch.duration_seconds,
|
||||
stopwatch.start_time_str,
|
||||
@ -140,5 +143,9 @@ if __name__ == "__main__":
|
||||
)
|
||||
|
||||
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||
if status == "error":
|
||||
if status == "failure":
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -7,16 +7,17 @@ import sys
|
||||
|
||||
from github import Github
|
||||
|
||||
from env_helper import TEMP_PATH, REPO_COPY, CLOUDFLARE_TOKEN
|
||||
from s3_helper import S3Helper
|
||||
from pr_info import PRInfo
|
||||
from get_robot_token import get_best_robot_token
|
||||
from ssh import SSHKey
|
||||
from upload_result_helper import upload_results
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from commit_status_helper import get_commit
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from env_helper import TEMP_PATH, REPO_COPY, CLOUDFLARE_TOKEN
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, TestResult
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from ssh import SSHKey
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
NAME = "Docs Release"
|
||||
|
||||
@ -32,7 +33,7 @@ def parse_args() -> argparse.Namespace:
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
args = parse_args()
|
||||
|
||||
@ -84,7 +85,7 @@ if __name__ == "__main__":
|
||||
|
||||
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
|
||||
files = os.listdir(test_output)
|
||||
lines = []
|
||||
test_results = [] # type: TestResults
|
||||
additional_files = []
|
||||
if not files:
|
||||
logging.error("No output files after docs release")
|
||||
@ -97,19 +98,19 @@ if __name__ == "__main__":
|
||||
with open(path, "r", encoding="utf-8") as check_file:
|
||||
for line in check_file:
|
||||
if "ERROR" in line:
|
||||
lines.append((line.split(":")[-1], "FAIL"))
|
||||
if lines:
|
||||
test_results.append(TestResult(line.split(":")[-1], "FAIL"))
|
||||
if test_results:
|
||||
status = "failure"
|
||||
description = "Found errors in docs"
|
||||
elif status != "failure":
|
||||
lines.append(("No errors found", "OK"))
|
||||
test_results.append(TestResult("No errors found", "OK"))
|
||||
else:
|
||||
lines.append(("Non zero exit code", "FAIL"))
|
||||
test_results.append(TestResult("Non zero exit code", "FAIL"))
|
||||
|
||||
s3_helper = S3Helper()
|
||||
|
||||
report_url = upload_results(
|
||||
s3_helper, pr_info.number, pr_info.sha, lines, additional_files, NAME
|
||||
s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME
|
||||
)
|
||||
print("::notice ::Report url: {report_url}")
|
||||
commit = get_commit(gh, pr_info.sha)
|
||||
@ -119,3 +120,7 @@ if __name__ == "__main__":
|
||||
|
||||
if status == "failure":
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -6,29 +6,31 @@ import os
|
||||
import csv
|
||||
import sys
|
||||
import atexit
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
from github import Github
|
||||
|
||||
from env_helper import CACHES_PATH, TEMP_PATH
|
||||
from pr_info import FORCE_TESTS_LABEL, PRInfo
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from upload_result_helper import upload_results
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from commit_status_helper import (
|
||||
post_commit_status,
|
||||
update_mergeable_check,
|
||||
)
|
||||
from ccache_utils import get_ccache_if_not_exists, upload_ccache
|
||||
from clickhouse_helper import (
|
||||
ClickHouseHelper,
|
||||
mark_flaky_tests,
|
||||
prepare_tests_results_for_clickhouse,
|
||||
)
|
||||
from stopwatch import Stopwatch
|
||||
from commit_status_helper import (
|
||||
post_commit_status,
|
||||
update_mergeable_check,
|
||||
)
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from env_helper import CACHES_PATH, TEMP_PATH
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import FORCE_TESTS_LABEL, PRInfo
|
||||
from report import TestResults, read_test_results
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from ccache_utils import get_ccache_if_not_exists, upload_ccache
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
NAME = "Fast test"
|
||||
|
||||
@ -53,8 +55,8 @@ def get_fasttest_cmd(
|
||||
|
||||
def process_results(
|
||||
result_folder: str,
|
||||
) -> Tuple[str, str, List[Tuple[str, str]], List[str]]:
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
) -> Tuple[str, str, TestResults, List[str]]:
|
||||
test_results = [] # type: TestResults
|
||||
additional_files = []
|
||||
# Just upload all files from result_folder.
|
||||
# If task provides processed results, then it's responsible for content of
|
||||
@ -78,17 +80,15 @@ def process_results(
|
||||
return "error", "Invalid check_status.tsv", test_results, additional_files
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
results_path = os.path.join(result_folder, "test_results.tsv")
|
||||
if os.path.exists(results_path):
|
||||
with open(results_path, "r", encoding="utf-8") as results_file:
|
||||
test_results = list(csv.reader(results_file, delimiter="\t")) # type: ignore
|
||||
results_path = Path(result_folder) / "test_results.tsv"
|
||||
test_results = read_test_results(results_path)
|
||||
if len(test_results) == 0:
|
||||
return "error", "Empty test_results.tsv", test_results, additional_files
|
||||
|
||||
return state, description, test_results, additional_files
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
stopwatch = Stopwatch()
|
||||
@ -175,7 +175,6 @@ if __name__ == "__main__":
|
||||
"test_log.txt" in test_output_files or "test_result.txt" in test_output_files
|
||||
)
|
||||
test_result_exists = "test_results.tsv" in test_output_files
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
if "submodule_log.txt" not in test_output_files:
|
||||
description = "Cannot clone repository"
|
||||
state = "failure"
|
||||
@ -210,7 +209,6 @@ if __name__ == "__main__":
|
||||
test_results,
|
||||
[run_log_path] + additional_logs,
|
||||
NAME,
|
||||
True,
|
||||
)
|
||||
print(f"::notice ::Report url: {report_url}")
|
||||
post_commit_status(gh, pr_info.sha, NAME, description, state, report_url)
|
||||
@ -232,3 +230,7 @@ if __name__ == "__main__":
|
||||
print(f"'{FORCE_TESTS_LABEL}' enabled, will report success")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -7,18 +7,17 @@ import os
|
||||
import subprocess
|
||||
import sys
|
||||
import atexit
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
from github import Github
|
||||
|
||||
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import FORCE_TESTS_LABEL, PRInfo
|
||||
from build_download_helper import download_all_deb_packages
|
||||
from download_release_packages import download_last_release
|
||||
from upload_result_helper import upload_results
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from clickhouse_helper import (
|
||||
ClickHouseHelper,
|
||||
mark_flaky_tests,
|
||||
prepare_tests_results_for_clickhouse,
|
||||
)
|
||||
from commit_status_helper import (
|
||||
post_commit_status,
|
||||
get_commit,
|
||||
@ -26,14 +25,17 @@ from commit_status_helper import (
|
||||
post_commit_status_to_file,
|
||||
update_mergeable_check,
|
||||
)
|
||||
from clickhouse_helper import (
|
||||
ClickHouseHelper,
|
||||
mark_flaky_tests,
|
||||
prepare_tests_results_for_clickhouse,
|
||||
)
|
||||
from stopwatch import Stopwatch
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from download_release_packages import download_last_release
|
||||
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import FORCE_TESTS_LABEL, PRInfo
|
||||
from report import TestResults, read_test_results
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
NO_CHANGES_MSG = "Nothing to run"
|
||||
|
||||
@ -126,8 +128,8 @@ def get_tests_to_run(pr_info):
|
||||
def process_results(
|
||||
result_folder: str,
|
||||
server_log_path: str,
|
||||
) -> Tuple[str, str, List[Tuple[str, str]], List[str]]:
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
) -> Tuple[str, str, TestResults, List[str]]:
|
||||
test_results = [] # type: TestResults
|
||||
additional_files = []
|
||||
# Just upload all files from result_folder.
|
||||
# If task provides processed results, then it's responsible for content of result_folder.
|
||||
@ -161,16 +163,15 @@ def process_results(
|
||||
return "error", "Invalid check_status.tsv", test_results, additional_files
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
results_path = os.path.join(result_folder, "test_results.tsv")
|
||||
results_path = Path(result_folder) / "test_results.tsv"
|
||||
|
||||
if os.path.exists(results_path):
|
||||
if results_path.exists():
|
||||
logging.info("Found test_results.tsv")
|
||||
else:
|
||||
logging.info("Files in result folder %s", os.listdir(result_folder))
|
||||
return "error", "Not found test_results.tsv", test_results, additional_files
|
||||
|
||||
with open(results_path, "r", encoding="utf-8") as results_file:
|
||||
test_results = list(csv.reader(results_file, delimiter="\t")) # type: ignore
|
||||
test_results = read_test_results(results_path)
|
||||
if len(test_results) == 0:
|
||||
return "error", "Empty test_results.tsv", test_results, additional_files
|
||||
|
||||
@ -195,7 +196,7 @@ def parse_args():
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
stopwatch = Stopwatch()
|
||||
@ -377,3 +378,7 @@ if __name__ == "__main__":
|
||||
print(f"'{FORCE_TESTS_LABEL}' enabled, will report success")
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -7,31 +7,33 @@ import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
from github import Github
|
||||
|
||||
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from build_download_helper import download_all_deb_packages
|
||||
from download_release_packages import download_last_release
|
||||
from upload_result_helper import upload_results
|
||||
from docker_pull_helper import get_images_with_versions
|
||||
from commit_status_helper import (
|
||||
post_commit_status,
|
||||
override_status,
|
||||
post_commit_status_to_file,
|
||||
)
|
||||
from clickhouse_helper import (
|
||||
ClickHouseHelper,
|
||||
mark_flaky_tests,
|
||||
prepare_tests_results_for_clickhouse,
|
||||
)
|
||||
from stopwatch import Stopwatch
|
||||
from commit_status_helper import (
|
||||
post_commit_status,
|
||||
override_status,
|
||||
post_commit_status_to_file,
|
||||
)
|
||||
from docker_pull_helper import get_images_with_versions
|
||||
from download_release_packages import download_last_release
|
||||
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, read_test_results
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
|
||||
# When update, update
|
||||
@ -90,8 +92,8 @@ def get_env_for_runner(build_path, repo_path, result_path, work_path):
|
||||
|
||||
def process_results(
|
||||
result_folder: str,
|
||||
) -> Tuple[str, str, List[Tuple[str, str]], List[str]]:
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
) -> Tuple[str, str, TestResults, List[str]]:
|
||||
test_results = [] # type: TestResults
|
||||
additional_files = []
|
||||
# Just upload all files from result_folder.
|
||||
# If task provides processed results, then it's responsible for content of result_folder.
|
||||
@ -115,10 +117,8 @@ def process_results(
|
||||
return "error", "Invalid check_status.tsv", test_results, additional_files
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
results_path = os.path.join(result_folder, "test_results.tsv")
|
||||
if os.path.exists(results_path):
|
||||
with open(results_path, "r", encoding="utf-8") as results_file:
|
||||
test_results = list(csv.reader(results_file, delimiter="\t")) # type: ignore
|
||||
results_path = Path(result_folder) / "test_results.tsv"
|
||||
test_results = read_test_results(results_path, False)
|
||||
if len(test_results) == 0:
|
||||
return "error", "Empty test_results.tsv", test_results, additional_files
|
||||
|
||||
@ -142,7 +142,7 @@ def parse_args():
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
stopwatch = Stopwatch()
|
||||
@ -271,7 +271,6 @@ if __name__ == "__main__":
|
||||
test_results,
|
||||
[output_path_log] + additional_logs,
|
||||
check_name_with_group,
|
||||
False,
|
||||
)
|
||||
|
||||
print(f"::notice:: {check_name} Report url: {report_url}")
|
||||
@ -303,5 +302,9 @@ if __name__ == "__main__":
|
||||
|
||||
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||
|
||||
if state == "error":
|
||||
if state == "failure":
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -11,20 +11,21 @@ import boto3 # type: ignore
|
||||
import requests # type: ignore
|
||||
from github import Github
|
||||
|
||||
from build_download_helper import get_build_name_for_check
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from commit_status_helper import post_commit_status
|
||||
from compress_files import compress_fast
|
||||
from env_helper import REPO_COPY, TEMP_PATH, S3_BUILDS_BUCKET, S3_DOWNLOAD
|
||||
from stopwatch import Stopwatch
|
||||
from upload_result_helper import upload_results
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token, get_parameter_from_ssm
|
||||
from pr_info import PRInfo
|
||||
from compress_files import compress_fast
|
||||
from commit_status_helper import post_commit_status
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from version_helper import get_version_from_repo
|
||||
from tee_popen import TeePopen
|
||||
from ssh import SSHKey
|
||||
from build_download_helper import get_build_name_for_check
|
||||
from report import TestResults, TestResult
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from ssh import SSHKey
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
from version_helper import get_version_from_repo
|
||||
|
||||
JEPSEN_GROUP_NAME = "jepsen_group"
|
||||
|
||||
@ -44,8 +45,8 @@ CRASHED_TESTS_ANCHOR = "# Crashed tests"
|
||||
FAILED_TESTS_ANCHOR = "# Failed tests"
|
||||
|
||||
|
||||
def _parse_jepsen_output(path):
|
||||
test_results = []
|
||||
def _parse_jepsen_output(path: str) -> TestResults:
|
||||
test_results = [] # type: TestResults
|
||||
current_type = ""
|
||||
with open(path, "r") as f:
|
||||
for line in f:
|
||||
@ -59,7 +60,7 @@ def _parse_jepsen_output(path):
|
||||
if (
|
||||
line.startswith("store/clickhouse") or line.startswith("clickhouse")
|
||||
) and current_type:
|
||||
test_results.append((line.strip(), current_type))
|
||||
test_results.append(TestResult(line.strip(), current_type))
|
||||
|
||||
return test_results
|
||||
|
||||
@ -266,7 +267,7 @@ if __name__ == "__main__":
|
||||
additional_data = []
|
||||
try:
|
||||
test_result = _parse_jepsen_output(jepsen_log_path)
|
||||
if any(r[1] == "FAIL" for r in test_result):
|
||||
if any(r.status == "FAIL" for r in test_result):
|
||||
status = "failure"
|
||||
description = "Found invalid analysis (ノಥ益ಥ)ノ ┻━┻"
|
||||
|
||||
@ -279,7 +280,7 @@ if __name__ == "__main__":
|
||||
print("Exception", ex)
|
||||
status = "failure"
|
||||
description = "No Jepsen output log"
|
||||
test_result = [("No Jepsen output log", "FAIL")]
|
||||
test_result = [TestResult("No Jepsen output log", "FAIL")]
|
||||
|
||||
s3_helper = S3Helper()
|
||||
report_url = upload_results(
|
||||
|
@ -1,4 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from ast import literal_eval
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple
|
||||
import csv
|
||||
import os
|
||||
import datetime
|
||||
|
||||
@ -167,6 +172,78 @@ HTML_TEST_PART = """
|
||||
BASE_HEADERS = ["Test name", "Test status"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestResult:
|
||||
name: str
|
||||
status: str
|
||||
# the following fields are optional
|
||||
time: Optional[float] = None
|
||||
log_files: Optional[List[Path]] = None
|
||||
raw_logs: Optional[str] = None
|
||||
# the field for uploaded logs URLs
|
||||
log_urls: Optional[List[str]] = None
|
||||
|
||||
def set_raw_logs(self, raw_logs: str) -> None:
|
||||
self.raw_logs = raw_logs
|
||||
|
||||
def set_log_files(self, log_files_literal: str) -> None:
|
||||
self.log_files = [] # type: Optional[List[Path]]
|
||||
log_paths = literal_eval(log_files_literal)
|
||||
if not isinstance(log_paths, list):
|
||||
raise ValueError(
|
||||
f"Malformed input: must be a list literal: {log_files_literal}"
|
||||
)
|
||||
for log_path in log_paths:
|
||||
file = Path(log_path)
|
||||
assert file.exists()
|
||||
self.log_files.append(file)
|
||||
|
||||
|
||||
TestResults = List[TestResult]
|
||||
|
||||
|
||||
def read_test_results(results_path: Path, with_raw_logs: bool = True) -> TestResults:
|
||||
results = [] # type: TestResults
|
||||
with open(results_path, "r", encoding="utf-8") as descriptor:
|
||||
reader = csv.reader(descriptor, delimiter="\t")
|
||||
for line in reader:
|
||||
name = line[0]
|
||||
status = line[1]
|
||||
time = None
|
||||
if len(line) >= 3 and line[2]:
|
||||
# The value can be emtpy, but when it's not,
|
||||
# it's the time spent on the test
|
||||
try:
|
||||
time = float(line[2])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
result = TestResult(name, status, time)
|
||||
if len(line) == 4 and line[3]:
|
||||
# The value can be emtpy, but when it's not,
|
||||
# the 4th value is a pythonic list, e.g. ['file1', 'file2']
|
||||
if with_raw_logs:
|
||||
result.set_raw_logs(line[3])
|
||||
else:
|
||||
result.set_log_files(line[3])
|
||||
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@dataclass
|
||||
class BuildResult:
|
||||
compiler: str
|
||||
build_type: str
|
||||
sanitizer: str
|
||||
status: str
|
||||
elapsed_seconds: int
|
||||
|
||||
|
||||
BuildResults = List[BuildResult]
|
||||
|
||||
|
||||
class ReportColorTheme:
|
||||
class ReportColor:
|
||||
yellow = "#FFB400"
|
||||
@ -178,6 +255,9 @@ class ReportColorTheme:
|
||||
bugfixcheck = (ReportColor.yellow, ReportColor.blue, ReportColor.blue)
|
||||
|
||||
|
||||
ColorTheme = Tuple[str, str, str]
|
||||
|
||||
|
||||
def _format_header(header, branch_name, branch_url=None):
|
||||
result = " ".join([w.capitalize() for w in header.split(" ")])
|
||||
result = result.replace("Clickhouse", "ClickHouse")
|
||||
@ -192,7 +272,7 @@ def _format_header(header, branch_name, branch_url=None):
|
||||
return result
|
||||
|
||||
|
||||
def _get_status_style(status, colortheme=None):
|
||||
def _get_status_style(status: str, colortheme: Optional[ColorTheme] = None) -> str:
|
||||
ok_statuses = ("OK", "success", "PASSED")
|
||||
fail_statuses = ("FAIL", "failure", "error", "FAILED", "Timeout")
|
||||
|
||||
@ -230,80 +310,79 @@ def _get_html_url(url):
|
||||
|
||||
|
||||
def create_test_html_report(
|
||||
header,
|
||||
test_result,
|
||||
raw_log_url,
|
||||
task_url,
|
||||
job_url,
|
||||
branch_url,
|
||||
branch_name,
|
||||
commit_url,
|
||||
additional_urls=None,
|
||||
with_raw_logs=False,
|
||||
statuscolors=None,
|
||||
):
|
||||
header: str,
|
||||
test_results: TestResults,
|
||||
raw_log_url: str,
|
||||
task_url: str,
|
||||
job_url: str,
|
||||
branch_url: str,
|
||||
branch_name: str,
|
||||
commit_url: str,
|
||||
additional_urls: Optional[List[str]] = None,
|
||||
statuscolors: Optional[ColorTheme] = None,
|
||||
) -> str:
|
||||
if additional_urls is None:
|
||||
additional_urls = []
|
||||
|
||||
if test_result:
|
||||
if test_results:
|
||||
rows_part = ""
|
||||
num_fails = 0
|
||||
has_test_time = False
|
||||
has_test_logs = False
|
||||
has_log_urls = False
|
||||
|
||||
if with_raw_logs:
|
||||
# Display entires with logs at the top (they correspond to failed tests)
|
||||
test_result.sort(key=lambda result: len(result) <= 3)
|
||||
# Display entires with logs at the top (they correspond to failed tests)
|
||||
test_results.sort(
|
||||
key=lambda result: result.raw_logs is None and result.log_files is None
|
||||
)
|
||||
|
||||
for result in test_result:
|
||||
test_name = result[0]
|
||||
test_status = result[1]
|
||||
|
||||
test_logs = None
|
||||
test_time = None
|
||||
if len(result) > 2:
|
||||
test_time = result[2]
|
||||
has_test_time = True
|
||||
|
||||
if len(result) > 3:
|
||||
test_logs = result[3]
|
||||
has_test_logs = True
|
||||
for test_result in test_results:
|
||||
colspan = 0
|
||||
if test_result.log_files is not None:
|
||||
has_log_urls = True
|
||||
|
||||
row = "<tr>"
|
||||
is_fail = test_status in ("FAIL", "FLAKY")
|
||||
if is_fail and with_raw_logs and test_logs is not None:
|
||||
is_fail = test_result.status in ("FAIL", "FLAKY")
|
||||
if is_fail and test_result.raw_logs is not None:
|
||||
row = '<tr class="failed">'
|
||||
row += "<td>" + test_name + "</td>"
|
||||
style = _get_status_style(test_status, colortheme=statuscolors)
|
||||
row += "<td>" + test_result.name + "</td>"
|
||||
colspan += 1
|
||||
style = _get_status_style(test_result.status, colortheme=statuscolors)
|
||||
|
||||
# Allow to quickly scroll to the first failure.
|
||||
is_fail_id = ""
|
||||
fail_id = ""
|
||||
if is_fail:
|
||||
num_fails = num_fails + 1
|
||||
is_fail_id = 'id="fail' + str(num_fails) + '" '
|
||||
fail_id = f'id="fail{num_fails}" '
|
||||
|
||||
row += f'<td {is_fail_id}style="{style}">{test_status}</td>'
|
||||
row += f'<td {fail_id}style="{style}">{test_result.status}</td>'
|
||||
colspan += 1
|
||||
|
||||
if test_time is not None:
|
||||
row += "<td>" + test_time + "</td>"
|
||||
if test_result.time is not None:
|
||||
has_test_time = True
|
||||
row += f"<td>{test_result.time}</td>"
|
||||
colspan += 1
|
||||
|
||||
if test_logs is not None and not with_raw_logs:
|
||||
test_logs_html = "<br>".join([_get_html_url(url) for url in test_logs])
|
||||
if test_result.log_urls is not None:
|
||||
test_logs_html = "<br>".join(
|
||||
[_get_html_url(url) for url in test_result.log_urls]
|
||||
)
|
||||
row += "<td>" + test_logs_html + "</td>"
|
||||
colspan += 1
|
||||
|
||||
row += "</tr>"
|
||||
rows_part += row
|
||||
if test_logs is not None and with_raw_logs:
|
||||
row = '<tr class="failed-content">'
|
||||
# TODO: compute colspan too
|
||||
row += '<td colspan="3"><pre>' + test_logs + "</pre></td>"
|
||||
row += "</tr>"
|
||||
if test_result.raw_logs is not None:
|
||||
row = (
|
||||
'<tr class="failed-content">'
|
||||
f'<td colspan="{colspan}"><pre>{test_result.raw_logs}</pre></td>'
|
||||
"</tr>"
|
||||
)
|
||||
rows_part += row
|
||||
|
||||
headers = BASE_HEADERS
|
||||
headers = BASE_HEADERS.copy()
|
||||
if has_test_time:
|
||||
headers.append("Test time, sec.")
|
||||
if has_test_logs and not with_raw_logs:
|
||||
if has_log_urls:
|
||||
headers.append("Logs")
|
||||
|
||||
headers_html = "".join(["<th>" + h + "</th>" for h in headers])
|
||||
@ -319,7 +398,7 @@ def create_test_html_report(
|
||||
if "?" in raw_log_name:
|
||||
raw_log_name = raw_log_name.split("?")[0]
|
||||
|
||||
result = HTML_BASE_TEST_TEMPLATE.format(
|
||||
html = HTML_BASE_TEST_TEMPLATE.format(
|
||||
title=_format_header(header, branch_name),
|
||||
header=_format_header(header, branch_name, branch_url),
|
||||
raw_log_name=raw_log_name,
|
||||
@ -331,7 +410,7 @@ def create_test_html_report(
|
||||
commit_url=commit_url,
|
||||
additional_urls=additional_html_urls,
|
||||
)
|
||||
return result
|
||||
return html
|
||||
|
||||
|
||||
HTML_BASE_BUILD_TEMPLATE = """
|
||||
@ -379,15 +458,15 @@ LINK_TEMPLATE = '<a href="{url}">{text}</a>'
|
||||
|
||||
|
||||
def create_build_html_report(
|
||||
header,
|
||||
build_results,
|
||||
build_logs_urls,
|
||||
artifact_urls_list,
|
||||
task_url,
|
||||
branch_url,
|
||||
branch_name,
|
||||
commit_url,
|
||||
):
|
||||
header: str,
|
||||
build_results: BuildResults,
|
||||
build_logs_urls: List[str],
|
||||
artifact_urls_list: List[List[str]],
|
||||
task_url: str,
|
||||
branch_url: str,
|
||||
branch_name: str,
|
||||
commit_url: str,
|
||||
) -> str:
|
||||
rows = ""
|
||||
for (build_result, build_log_url, artifact_urls) in zip(
|
||||
build_results, build_logs_urls, artifact_urls_list
|
||||
|
@ -4,27 +4,27 @@ import logging
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
from typing import List, Tuple
|
||||
from typing import List
|
||||
|
||||
from github import Github
|
||||
|
||||
from build_download_helper import get_build_name_for_check, read_build_urls
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from commit_status_helper import post_commit_status
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from env_helper import (
|
||||
GITHUB_REPOSITORY,
|
||||
GITHUB_RUN_URL,
|
||||
REPORTS_PATH,
|
||||
REPO_COPY,
|
||||
TEMP_PATH,
|
||||
)
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from build_download_helper import get_build_name_for_check, read_build_urls
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from commit_status_helper import post_commit_status
|
||||
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
|
||||
from upload_result_helper import upload_results
|
||||
from stopwatch import Stopwatch
|
||||
from report import TestResults, TestResult
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
IMAGE_NAME = "clickhouse/sqlancer-test"
|
||||
|
||||
@ -52,13 +52,12 @@ def get_commit(gh, commit_sha):
|
||||
return commit
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
stopwatch = Stopwatch()
|
||||
|
||||
temp_path = TEMP_PATH
|
||||
repo_path = REPO_COPY
|
||||
reports_path = REPORTS_PATH
|
||||
|
||||
check_name = sys.argv[1]
|
||||
@ -127,11 +126,6 @@ if __name__ == "__main__":
|
||||
|
||||
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True)
|
||||
|
||||
check_name_lower = (
|
||||
check_name.lower().replace("(", "").replace(")", "").replace(" ", "")
|
||||
)
|
||||
s3_prefix = f"{pr_info.number}/{pr_info.sha}/{check_name_lower}/"
|
||||
|
||||
tests = [
|
||||
"TLPGroupBy",
|
||||
"TLPHaving",
|
||||
@ -158,7 +152,7 @@ if __name__ == "__main__":
|
||||
report_url = GITHUB_RUN_URL
|
||||
|
||||
status = "success"
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
test_results = [] # type: TestResults
|
||||
# Try to get status message saved by the SQLancer
|
||||
try:
|
||||
# with open(
|
||||
@ -166,13 +160,13 @@ if __name__ == "__main__":
|
||||
# ) as status_f:
|
||||
# status = status_f.readline().rstrip("\n")
|
||||
if os.path.exists(os.path.join(workspace_path, "server_crashed.log")):
|
||||
test_results.append(("Server crashed", "FAIL"))
|
||||
test_results.append(TestResult("Server crashed", "FAIL"))
|
||||
with open(
|
||||
os.path.join(workspace_path, "summary.tsv"), "r", encoding="utf-8"
|
||||
) as summary_f:
|
||||
for line in summary_f:
|
||||
l = line.rstrip("\n").split("\t")
|
||||
test_results.append((l[0], l[1]))
|
||||
test_results.append(TestResult(l[0], l[1]))
|
||||
|
||||
with open(
|
||||
os.path.join(workspace_path, "description.txt"), "r", encoding="utf-8"
|
||||
@ -189,7 +183,6 @@ if __name__ == "__main__":
|
||||
test_results,
|
||||
paths,
|
||||
check_name,
|
||||
False,
|
||||
)
|
||||
|
||||
post_commit_status(gh, pr_info.sha, check_name, description, status, report_url)
|
||||
@ -212,3 +205,7 @@ if __name__ == "__main__":
|
||||
|
||||
print(f"::notice Result: '{status}', '{description}', '{report_url}'")
|
||||
post_commit_status(gh, pr_info.sha, check_name, description, status, report_url)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -9,9 +9,9 @@ class Stopwatch:
|
||||
self.start_time_str_value = self.start_time.strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
@property
|
||||
def duration_seconds(self):
|
||||
def duration_seconds(self) -> float:
|
||||
return (datetime.datetime.utcnow() - self.start_time).total_seconds()
|
||||
|
||||
@property
|
||||
def start_time_str(self):
|
||||
def start_time_str(self) -> str:
|
||||
return self.start_time_str_value
|
||||
|
@ -5,26 +5,28 @@ import logging
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
from github import Github
|
||||
|
||||
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from build_download_helper import download_all_deb_packages
|
||||
from upload_result_helper import upload_results
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from commit_status_helper import post_commit_status
|
||||
from clickhouse_helper import (
|
||||
ClickHouseHelper,
|
||||
mark_flaky_tests,
|
||||
prepare_tests_results_for_clickhouse,
|
||||
)
|
||||
from stopwatch import Stopwatch
|
||||
from commit_status_helper import post_commit_status
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, read_test_results
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
|
||||
def get_run_command(
|
||||
@ -48,8 +50,8 @@ def get_run_command(
|
||||
|
||||
def process_results(
|
||||
result_folder: str, server_log_path: str, run_log_path: str
|
||||
) -> Tuple[str, str, List[Tuple[str, str]], List[str]]:
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
) -> Tuple[str, str, TestResults, List[str]]:
|
||||
test_results = [] # type: TestResults
|
||||
additional_files = []
|
||||
# Just upload all files from result_folder.
|
||||
# If task provides processed results, then it's responsible for content
|
||||
@ -91,16 +93,15 @@ def process_results(
|
||||
return "error", "Invalid check_status.tsv", test_results, additional_files
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
results_path = os.path.join(result_folder, "test_results.tsv")
|
||||
with open(results_path, "r", encoding="utf-8") as results_file:
|
||||
test_results = list(csv.reader(results_file, delimiter="\t")) # type: ignore
|
||||
results_path = Path(result_folder) / "test_results.tsv"
|
||||
test_results = read_test_results(results_path, False)
|
||||
if len(test_results) == 0:
|
||||
raise Exception("Empty results")
|
||||
|
||||
return state, description, test_results, additional_files
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
stopwatch = Stopwatch()
|
||||
@ -185,5 +186,9 @@ if __name__ == "__main__":
|
||||
)
|
||||
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||
|
||||
if state == "error":
|
||||
if state == "failure":
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -6,7 +6,7 @@ import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple
|
||||
|
||||
|
||||
@ -22,6 +22,7 @@ from get_robot_token import get_best_robot_token
|
||||
from github_helper import GitHub
|
||||
from git_helper import git_runner
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, read_test_results
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from ssh import SSHKey
|
||||
@ -40,8 +41,8 @@ GIT_PREFIX = ( # All commits to remote are done as robot-clickhouse
|
||||
|
||||
def process_result(
|
||||
result_folder: str,
|
||||
) -> Tuple[str, str, List[Tuple[str, str]], List[str]]:
|
||||
test_results = [] # type: List[Tuple[str, str]]
|
||||
) -> Tuple[str, str, TestResults, List[str]]:
|
||||
test_results = [] # type: TestResults
|
||||
additional_files = []
|
||||
# Just upload all files from result_folder.
|
||||
# If task provides processed results, then it's responsible
|
||||
@ -57,7 +58,7 @@ def process_result(
|
||||
status = []
|
||||
status_path = os.path.join(result_folder, "check_status.tsv")
|
||||
if os.path.exists(status_path):
|
||||
logging.info("Found test_results.tsv")
|
||||
logging.info("Found check_status.tsv")
|
||||
with open(status_path, "r", encoding="utf-8") as status_file:
|
||||
status = list(csv.reader(status_file, delimiter="\t"))
|
||||
if len(status) != 1 or len(status[0]) != 2:
|
||||
@ -66,9 +67,8 @@ def process_result(
|
||||
state, description = status[0][0], status[0][1]
|
||||
|
||||
try:
|
||||
results_path = os.path.join(result_folder, "test_results.tsv")
|
||||
with open(results_path, "r", encoding="utf-8") as fd:
|
||||
test_results = list(csv.reader(fd, delimiter="\t")) # type: ignore
|
||||
results_path = Path(result_folder) / "test_results.tsv"
|
||||
test_results = read_test_results(results_path)
|
||||
if len(test_results) == 0:
|
||||
raise Exception("Empty results")
|
||||
|
||||
@ -134,7 +134,7 @@ def commit_push_staged(pr_info: PRInfo) -> None:
|
||||
git_runner(push_cmd)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.getLogger("git_helper").setLevel(logging.DEBUG)
|
||||
args = parse_args()
|
||||
@ -205,3 +205,7 @@ if __name__ == "__main__":
|
||||
|
||||
if state in ["error", "failure"]:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,10 +1,11 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from io import TextIOWrapper
|
||||
from pathlib import Path
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
from threading import Thread
|
||||
from time import sleep
|
||||
from typing import Optional
|
||||
from typing import Optional, Union
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
@ -18,7 +19,7 @@ class TeePopen:
|
||||
def __init__(
|
||||
self,
|
||||
command: str,
|
||||
log_file: str,
|
||||
log_file: Union[str, Path],
|
||||
env: Optional[dict] = None,
|
||||
timeout: Optional[int] = None,
|
||||
):
|
||||
@ -63,7 +64,7 @@ class TeePopen:
|
||||
self.wait()
|
||||
self.log_file.close()
|
||||
|
||||
def wait(self):
|
||||
def wait(self) -> int:
|
||||
if self.process.stdout is not None:
|
||||
for line in self.process.stdout:
|
||||
sys.stdout.write(line)
|
||||
|
@ -9,22 +9,23 @@ from typing import List, Tuple
|
||||
|
||||
from github import Github
|
||||
|
||||
from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH
|
||||
from s3_helper import S3Helper
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from build_download_helper import download_unit_tests
|
||||
from upload_result_helper import upload_results
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from commit_status_helper import post_commit_status, update_mergeable_check
|
||||
from clickhouse_helper import (
|
||||
ClickHouseHelper,
|
||||
mark_flaky_tests,
|
||||
prepare_tests_results_for_clickhouse,
|
||||
)
|
||||
from stopwatch import Stopwatch
|
||||
from commit_status_helper import post_commit_status, update_mergeable_check
|
||||
from docker_pull_helper import get_image_with_version
|
||||
from env_helper import TEMP_PATH, REPORTS_PATH
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import PRInfo
|
||||
from report import TestResults, TestResult
|
||||
from rerun_helper import RerunHelper
|
||||
from s3_helper import S3Helper
|
||||
from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
|
||||
|
||||
IMAGE_NAME = "clickhouse/unit-test"
|
||||
@ -40,20 +41,20 @@ def get_test_name(line):
|
||||
|
||||
def process_results(
|
||||
result_folder: str,
|
||||
) -> Tuple[str, str, List[Tuple[str, str]], List[str]]:
|
||||
) -> Tuple[str, str, TestResults, List[str]]:
|
||||
OK_SIGN = "OK ]"
|
||||
FAILED_SIGN = "FAILED ]"
|
||||
SEGFAULT = "Segmentation fault"
|
||||
SIGNAL = "received signal SIG"
|
||||
PASSED = "PASSED"
|
||||
|
||||
summary = [] # type: List[Tuple[str, str]]
|
||||
test_results = [] # type: TestResults
|
||||
total_counter = 0
|
||||
failed_counter = 0
|
||||
result_log_path = f"{result_folder}/test_result.txt"
|
||||
if not os.path.exists(result_log_path):
|
||||
logging.info("No output log on path %s", result_log_path)
|
||||
return "error", "No output log", summary, []
|
||||
return "error", "No output log", test_results, []
|
||||
|
||||
status = "success"
|
||||
description = ""
|
||||
@ -64,13 +65,13 @@ def process_results(
|
||||
logging.info("Found ok line: '%s'", line)
|
||||
test_name = get_test_name(line.strip())
|
||||
logging.info("Test name: '%s'", test_name)
|
||||
summary.append((test_name, "OK"))
|
||||
test_results.append(TestResult(test_name, "OK"))
|
||||
total_counter += 1
|
||||
elif FAILED_SIGN in line and "listed below" not in line and "ms)" in line:
|
||||
logging.info("Found fail line: '%s'", line)
|
||||
test_name = get_test_name(line.strip())
|
||||
logging.info("Test name: '%s'", test_name)
|
||||
summary.append((test_name, "FAIL"))
|
||||
test_results.append(TestResult(test_name, "FAIL"))
|
||||
total_counter += 1
|
||||
failed_counter += 1
|
||||
elif SEGFAULT in line:
|
||||
@ -99,16 +100,15 @@ def process_results(
|
||||
f"fail: {failed_counter}, passed: {total_counter - failed_counter}"
|
||||
)
|
||||
|
||||
return status, description, summary, [result_log_path]
|
||||
return status, description, test_results, [result_log_path]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
stopwatch = Stopwatch()
|
||||
|
||||
temp_path = TEMP_PATH
|
||||
repo_path = REPO_COPY
|
||||
reports_path = REPORTS_PATH
|
||||
|
||||
check_name = sys.argv[1]
|
||||
@ -182,5 +182,9 @@ if __name__ == "__main__":
|
||||
|
||||
ch_helper.insert_events_into(db="default", table="checks", events=prepared_events)
|
||||
|
||||
if state == "error":
|
||||
if state == "failure":
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -1,6 +1,7 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
import os
|
||||
import logging
|
||||
import ast
|
||||
|
||||
from env_helper import (
|
||||
GITHUB_JOB_URL,
|
||||
@ -8,34 +9,35 @@ from env_helper import (
|
||||
GITHUB_RUN_URL,
|
||||
GITHUB_SERVER_URL,
|
||||
)
|
||||
from report import ReportColorTheme, create_test_html_report
|
||||
from report import ReportColorTheme, TestResults, create_test_html_report
|
||||
from s3_helper import S3Helper
|
||||
|
||||
|
||||
def process_logs(
|
||||
s3_client, additional_logs, s3_path_prefix, test_results, with_raw_logs
|
||||
):
|
||||
s3_client: S3Helper,
|
||||
additional_logs: List[str],
|
||||
s3_path_prefix: str,
|
||||
test_results: TestResults,
|
||||
) -> List[str]:
|
||||
logging.info("Upload files to s3 %s", additional_logs)
|
||||
|
||||
processed_logs = {} # type: ignore
|
||||
processed_logs = {} # type: Dict[Path, str]
|
||||
# Firstly convert paths of logs from test_results to urls to s3.
|
||||
for test_result in test_results:
|
||||
if len(test_result) <= 3 or with_raw_logs:
|
||||
if test_result.log_files is None:
|
||||
continue
|
||||
|
||||
# Convert from string repr of list to list.
|
||||
test_log_paths = ast.literal_eval(test_result[3])
|
||||
test_log_urls = []
|
||||
for log_path in test_log_paths:
|
||||
if log_path in processed_logs:
|
||||
test_log_urls.append(processed_logs[log_path])
|
||||
elif log_path:
|
||||
test_result.log_urls = []
|
||||
for path in test_result.log_files:
|
||||
if path in processed_logs:
|
||||
test_result.log_urls.append(processed_logs[path])
|
||||
elif path:
|
||||
url = s3_client.upload_test_report_to_s3(
|
||||
log_path, s3_path_prefix + "/" + os.path.basename(log_path)
|
||||
path.as_posix(), s3_path_prefix + "/" + path.name
|
||||
)
|
||||
test_log_urls.append(url)
|
||||
processed_logs[log_path] = url
|
||||
|
||||
test_result[3] = test_log_urls
|
||||
test_result.log_urls.append(url)
|
||||
processed_logs[path] = url
|
||||
|
||||
additional_urls = []
|
||||
for log_path in additional_logs:
|
||||
@ -50,20 +52,18 @@ def process_logs(
|
||||
|
||||
|
||||
def upload_results(
|
||||
s3_client,
|
||||
pr_number,
|
||||
commit_sha,
|
||||
test_results,
|
||||
additional_files,
|
||||
check_name,
|
||||
with_raw_logs=True,
|
||||
statuscolors=None,
|
||||
):
|
||||
s3_client: S3Helper,
|
||||
pr_number: int,
|
||||
commit_sha: str,
|
||||
test_results: TestResults,
|
||||
additional_files: List[str],
|
||||
check_name: str,
|
||||
) -> str:
|
||||
s3_path_prefix = f"{pr_number}/{commit_sha}/" + check_name.lower().replace(
|
||||
" ", "_"
|
||||
).replace("(", "_").replace(")", "_").replace(",", "_")
|
||||
additional_urls = process_logs(
|
||||
s3_client, additional_files, s3_path_prefix, test_results, with_raw_logs
|
||||
s3_client, additional_files, s3_path_prefix, test_results
|
||||
)
|
||||
|
||||
branch_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/commits/master"
|
||||
@ -74,8 +74,7 @@ def upload_results(
|
||||
commit_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/commit/{commit_sha}"
|
||||
|
||||
if additional_urls:
|
||||
raw_log_url = additional_urls[0]
|
||||
additional_urls.pop(0)
|
||||
raw_log_url = additional_urls.pop(0)
|
||||
else:
|
||||
raw_log_url = GITHUB_JOB_URL()
|
||||
|
||||
@ -93,7 +92,6 @@ def upload_results(
|
||||
branch_name,
|
||||
commit_url,
|
||||
additional_urls,
|
||||
with_raw_logs,
|
||||
statuscolors=statuscolors,
|
||||
)
|
||||
with open("report.html", "w", encoding="utf-8") as f:
|
||||
|
Loading…
Reference in New Issue
Block a user