There is no point in detecting flaky tests

This commit is contained in:
Alexey Milovidov 2023-07-15 23:00:23 +02:00
parent 9e7361a0f6
commit 20b77e946a
11 changed files with 11 additions and 69 deletions

View File

@ -190,27 +190,3 @@ def prepare_tests_results_for_clickhouse(
result.append(current_row)
return result
def mark_flaky_tests(
clickhouse_helper: ClickHouseHelper, check_name: str, test_results: TestResults
) -> None:
try:
query = f"""SELECT DISTINCT test_name
FROM checks
WHERE
check_start_time BETWEEN now() - INTERVAL 3 DAY AND now()
AND check_name = '{check_name}'
AND (test_status = 'FAIL' OR test_status = 'FLAKY')
AND pull_request_number = 0
"""
tests_data = clickhouse_helper.select_json_each_row("default", query)
master_failed_tests = {row["test_name"] for row in tests_data}
logging.info("Found flaky tests: %s", ", ".join(master_failed_tests))
for test_result in test_results:
if test_result.status == "FAIL" and test_result.name in master_failed_tests:
test_result.status = "FLAKY"
except Exception as ex:
logging.error("Exception happened during flaky tests fetch %s", ex)

View File

@ -13,7 +13,6 @@ from github import Github
from build_download_helper import download_builds_filter
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import RerunHelper, get_commit, post_commit_status
@ -231,7 +230,6 @@ def main():
)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, args.check_name, test_results)
report_url = upload_results(
s3_helper,

View File

@ -14,7 +14,6 @@ from github import Github
from build_check import get_release_or_pr
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import (
@ -190,7 +189,6 @@ def main():
state, description, test_results, additional_logs = process_results(output_path)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, NAME, test_results)
s3_path_prefix = os.path.join(
get_release_or_pr(pr_info, get_version_from_repo())[0],
pr_info.sha,

View File

@ -16,7 +16,6 @@ from github import Github
from build_download_helper import download_all_deb_packages
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import (
@ -368,7 +367,6 @@ def main():
state = override_status(state, check_name, invert=validate_bugfix_check)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, check_name, test_results)
report_url = upload_results(
s3_helper,

View File

@ -15,7 +15,6 @@ from github import Github
from build_download_helper import download_builds_filter
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import (
@ -345,7 +344,6 @@ def main():
return
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, args.check_name, test_results)
description = format_description(description)

View File

@ -15,7 +15,6 @@ from github import Github
from build_download_helper import download_all_deb_packages
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import (
@ -276,7 +275,6 @@ def main():
state = override_status(state, check_name, invert=validate_bugfix_check)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, check_name, test_results)
s3_helper = S3Helper()
report_url = upload_results(

View File

@ -349,7 +349,7 @@ def create_test_html_report(
has_log_urls = True
row = "<tr>"
has_error = test_result.status in ("FAIL", "FLAKY", "NOT_FAILED")
has_error = test_result.status in ("FAIL", "NOT_FAILED")
if has_error and test_result.raw_logs is not None:
row = '<tr class="failed">'
row += "<td>" + test_result.name + "</td>"

View File

@ -13,7 +13,6 @@ from github import Github
from build_download_helper import download_all_deb_packages
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import RerunHelper, get_commit, post_commit_status
@ -168,7 +167,6 @@ def run_stress_test(docker_image_name):
result_path, server_log_path, run_log_path
)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, check_name, test_results)
report_url = upload_results(
s3_helper,

View File

@ -12,7 +12,6 @@ from typing import List, Tuple
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import (
@ -189,7 +188,6 @@ def main():
state, description, test_results, additional_files = process_result(temp_path)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, NAME, test_results)
report_url = upload_results(
s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME

View File

@ -12,7 +12,6 @@ from github import Github
from build_download_helper import download_unit_tests
from clickhouse_helper import (
ClickHouseHelper,
mark_flaky_tests,
prepare_tests_results_for_clickhouse,
)
from commit_status_helper import (
@ -159,7 +158,6 @@ def main():
state, description, test_results, additional_logs = process_results(test_output)
ch_helper = ClickHouseHelper()
mark_flaky_tests(ch_helper, check_name, test_results)
report_url = upload_results(
s3_helper,

View File

@ -487,24 +487,14 @@ class ClickhouseIntegrationTestsRunner:
def _update_counters(self, main_counters, current_counters, broken_tests):
for test in current_counters["PASSED"]:
if (
test not in main_counters["PASSED"]
and test not in main_counters["FLAKY"]
):
is_flaky = False
if test not in main_counters["PASSED"]:
if test in main_counters["FAILED"]:
main_counters["FAILED"].remove(test)
is_flaky = True
if test in main_counters["ERROR"]:
main_counters["ERROR"].remove(test)
is_flaky = True
if test in main_counters["BROKEN"]:
main_counters["BROKEN"].remove(test)
is_flaky = True
if is_flaky:
main_counters["FLAKY"].append(test)
else:
if test not in broken_tests:
main_counters["PASSED"].append(test)
else:
@ -512,11 +502,8 @@ class ClickhouseIntegrationTestsRunner:
for state in ("ERROR", "FAILED"):
for test in current_counters[state]:
if test in main_counters["FLAKY"]:
continue
if test in main_counters["PASSED"]:
main_counters["PASSED"].remove(test)
main_counters["FLAKY"].append(test)
continue
if test not in broken_tests:
if test not in main_counters[state]:
@ -605,7 +592,6 @@ class ClickhouseIntegrationTestsRunner:
"PASSED": [],
"FAILED": [],
"SKIPPED": [],
"FLAKY": [],
}
tests_times = defaultdict(float)
for test in tests_in_group:
@ -627,7 +613,6 @@ class ClickhouseIntegrationTestsRunner:
"PASSED": [],
"FAILED": [],
"SKIPPED": [],
"FLAKY": [],
"BROKEN": [],
"NOT_FAILED": [],
}
@ -757,11 +742,11 @@ class ClickhouseIntegrationTestsRunner:
)
log_paths.append(extras_result_path)
if len(counters["PASSED"]) + len(counters["FLAKY"]) == len(tests_in_group):
if len(counters["PASSED"]) == len(tests_in_group):
logging.info("All tests from group %s passed", test_group)
break
if (
len(counters["PASSED"]) + len(counters["FLAKY"]) >= 0
len(counters["PASSED"]) >= 0
and len(counters["FAILED"]) == 0
and len(counters["ERROR"]) == 0
):
@ -825,7 +810,7 @@ class ClickhouseIntegrationTestsRunner:
result_state = "failure"
if not should_fail:
break
assert len(counters["FLAKY"]) == 0 or should_fail
assert should_fail
logging.info("Try is OK, all tests passed, going to clear env")
clear_ip_tables_and_restart_daemons()
logging.info("And going to sleep for some time")
@ -835,7 +820,7 @@ class ClickhouseIntegrationTestsRunner:
time.sleep(5)
test_result = []
for state in ("ERROR", "FAILED", "PASSED", "SKIPPED", "FLAKY"):
for state in ("ERROR", "FAILED", "PASSED", "SKIPPED"):
if state == "PASSED":
text_state = "OK"
elif state == "FAILED":
@ -928,7 +913,6 @@ class ClickhouseIntegrationTestsRunner:
"PASSED": [],
"FAILED": [],
"SKIPPED": [],
"FLAKY": [],
"BROKEN": [],
"NOT_FAILED": [],
}
@ -988,7 +972,6 @@ class ClickhouseIntegrationTestsRunner:
"FAILED",
"PASSED",
"SKIPPED",
"FLAKY",
"BROKEN",
"NOT_FAILED",
):
@ -1004,15 +987,14 @@ class ClickhouseIntegrationTestsRunner:
]
failed_sum = len(counters["FAILED"]) + len(counters["ERROR"])
status_text = "fail: {}, passed: {}, flaky: {}".format(
failed_sum, len(counters["PASSED"]), len(counters["FLAKY"])
status_text = "fail: {}, passed: {}".format(
failed_sum, len(counters["PASSED"])
)
if self.soft_deadline_time < time.time():
status_text = "Timeout, " + status_text
result_state = "failure"
counters["FLAKY"] = []
if not counters or sum(len(counter) for counter in counters.values()) == 0:
status_text = "No tests found for some reason! It's a bug"
result_state = "failure"