2021-02-20 20:04:24 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
import os
|
|
|
|
import logging
|
|
|
|
import argparse
|
|
|
|
import csv
|
|
|
|
|
|
|
|
OK_SIGN = "[ OK "
|
2021-06-14 01:11:16 +00:00
|
|
|
FAIL_SIGN = "[ FAIL "
|
|
|
|
TIMEOUT_SIGN = "[ Timeout! "
|
2021-02-20 20:04:24 +00:00
|
|
|
UNKNOWN_SIGN = "[ UNKNOWN "
|
|
|
|
SKIPPED_SIGN = "[ SKIPPED "
|
|
|
|
HUNG_SIGN = "Found hung queries in processlist"
|
2022-06-13 15:36:57 +00:00
|
|
|
DATABASE_SIGN = "Database: "
|
2021-02-20 20:04:24 +00:00
|
|
|
|
2022-05-03 08:41:48 +00:00
|
|
|
SUCCESS_FINISH_SIGNS = ["All tests have finished", "No tests were run"]
|
2021-03-16 16:39:31 +00:00
|
|
|
|
2021-06-16 21:20:35 +00:00
|
|
|
RETRIES_SIGN = "Some tests were restarted"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
|
2021-02-20 20:04:24 +00:00
|
|
|
def process_test_log(log_path):
|
|
|
|
total = 0
|
|
|
|
skipped = 0
|
|
|
|
unknown = 0
|
|
|
|
failed = 0
|
|
|
|
success = 0
|
|
|
|
hung = False
|
2021-06-16 21:20:35 +00:00
|
|
|
retries = False
|
2022-05-03 08:41:48 +00:00
|
|
|
success_finish = False
|
2021-02-20 20:04:24 +00:00
|
|
|
test_results = []
|
2022-06-13 15:36:57 +00:00
|
|
|
test_end = True
|
2022-03-22 16:39:58 +00:00
|
|
|
with open(log_path, "r") as test_file:
|
2021-02-20 20:04:24 +00:00
|
|
|
for line in test_file:
|
2021-10-07 09:49:41 +00:00
|
|
|
original_line = line
|
2021-02-20 20:04:24 +00:00
|
|
|
line = line.strip()
|
2022-06-13 15:36:57 +00:00
|
|
|
|
2022-05-03 08:41:48 +00:00
|
|
|
if any(s in line for s in SUCCESS_FINISH_SIGNS):
|
|
|
|
success_finish = True
|
2022-06-13 15:36:57 +00:00
|
|
|
# Ignore hung check report, since it may be quite large.
|
|
|
|
# (and may break python parser which has limit of 128KiB for each row).
|
2021-02-20 20:04:24 +00:00
|
|
|
if HUNG_SIGN in line:
|
|
|
|
hung = True
|
2022-06-13 15:36:57 +00:00
|
|
|
break
|
2021-06-16 21:20:35 +00:00
|
|
|
if RETRIES_SIGN in line:
|
|
|
|
retries = True
|
2022-03-22 16:39:58 +00:00
|
|
|
if any(
|
|
|
|
sign in line
|
|
|
|
for sign in (OK_SIGN, FAIL_SIGN, UNKNOWN_SIGN, SKIPPED_SIGN)
|
|
|
|
):
|
|
|
|
test_name = line.split(" ")[2].split(":")[0]
|
2021-02-20 20:04:24 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
test_time = ""
|
2021-02-20 20:04:24 +00:00
|
|
|
try:
|
2022-03-22 16:39:58 +00:00
|
|
|
time_token = line.split("]")[1].strip().split()[0]
|
2021-02-20 20:04:24 +00:00
|
|
|
float(time_token)
|
|
|
|
test_time = time_token
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
total += 1
|
2021-06-14 01:11:16 +00:00
|
|
|
if TIMEOUT_SIGN in line:
|
2021-02-20 20:04:24 +00:00
|
|
|
failed += 1
|
2021-09-30 15:44:30 +00:00
|
|
|
test_results.append((test_name, "Timeout", test_time, []))
|
2021-06-14 01:11:16 +00:00
|
|
|
elif FAIL_SIGN in line:
|
2021-02-20 20:04:24 +00:00
|
|
|
failed += 1
|
2021-09-30 15:44:30 +00:00
|
|
|
test_results.append((test_name, "FAIL", test_time, []))
|
2021-02-20 20:04:24 +00:00
|
|
|
elif UNKNOWN_SIGN in line:
|
|
|
|
unknown += 1
|
2021-09-30 15:44:30 +00:00
|
|
|
test_results.append((test_name, "FAIL", test_time, []))
|
2021-02-20 20:04:24 +00:00
|
|
|
elif SKIPPED_SIGN in line:
|
|
|
|
skipped += 1
|
2021-09-30 15:44:30 +00:00
|
|
|
test_results.append((test_name, "SKIPPED", test_time, []))
|
2021-02-20 20:04:24 +00:00
|
|
|
else:
|
|
|
|
success += int(OK_SIGN in line)
|
2021-09-30 15:44:30 +00:00
|
|
|
test_results.append((test_name, "OK", test_time, []))
|
2022-06-13 15:36:57 +00:00
|
|
|
test_end = False
|
|
|
|
elif (
|
|
|
|
len(test_results) > 0 and test_results[-1][1] == "FAIL" and not test_end
|
|
|
|
):
|
2021-10-07 09:49:41 +00:00
|
|
|
test_results[-1][3].append(original_line)
|
2022-06-13 15:36:57 +00:00
|
|
|
# Database printed after everything else in case of failures,
|
|
|
|
# so this is a stop marker for capturing test output.
|
|
|
|
#
|
|
|
|
# And it is handled after everything else to include line with database into the report.
|
|
|
|
if DATABASE_SIGN in line:
|
|
|
|
test_end = True
|
2021-09-30 15:44:30 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
test_results = [
|
2022-08-10 18:20:52 +00:00
|
|
|
(test[0], test[1], test[2], "".join(test[3])[:4096]) for test in test_results
|
2022-03-22 16:39:58 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
return (
|
|
|
|
total,
|
|
|
|
skipped,
|
|
|
|
unknown,
|
|
|
|
failed,
|
|
|
|
success,
|
|
|
|
hung,
|
2022-05-03 08:41:48 +00:00
|
|
|
success_finish,
|
2022-03-22 16:39:58 +00:00
|
|
|
retries,
|
|
|
|
test_results,
|
|
|
|
)
|
2021-09-30 15:44:30 +00:00
|
|
|
|
2021-02-20 20:04:24 +00:00
|
|
|
|
|
|
|
def process_result(result_path):
|
|
|
|
test_results = []
|
|
|
|
state = "success"
|
|
|
|
description = ""
|
|
|
|
files = os.listdir(result_path)
|
|
|
|
if files:
|
2022-03-22 16:39:58 +00:00
|
|
|
logging.info("Find files in result folder %s", ",".join(files))
|
|
|
|
result_path = os.path.join(result_path, "test_result.txt")
|
2021-02-20 20:04:24 +00:00
|
|
|
else:
|
|
|
|
result_path = None
|
|
|
|
description = "No output log"
|
|
|
|
state = "error"
|
|
|
|
|
|
|
|
if result_path and os.path.exists(result_path):
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
|
|
|
total,
|
|
|
|
skipped,
|
|
|
|
unknown,
|
|
|
|
failed,
|
|
|
|
success,
|
|
|
|
hung,
|
2022-05-03 08:41:48 +00:00
|
|
|
success_finish,
|
2022-03-22 16:39:58 +00:00
|
|
|
retries,
|
|
|
|
test_results,
|
|
|
|
) = process_test_log(result_path)
|
|
|
|
is_flacky_check = 1 < int(os.environ.get("NUM_TRIES", 1))
|
2022-06-13 15:28:23 +00:00
|
|
|
logging.info("Is flaky check: %s", is_flacky_check)
|
2021-02-20 20:04:24 +00:00
|
|
|
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
|
|
|
# But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
|
|
|
|
if failed != 0 or unknown != 0 or (success == 0 and (not is_flacky_check)):
|
|
|
|
state = "failure"
|
|
|
|
|
|
|
|
if hung:
|
|
|
|
description = "Some queries hung, "
|
|
|
|
state = "failure"
|
2021-09-30 15:44:30 +00:00
|
|
|
test_results.append(("Some queries hung", "FAIL", "0", ""))
|
2022-05-03 08:41:48 +00:00
|
|
|
elif not success_finish:
|
|
|
|
description = "Tests are not finished, "
|
2021-03-16 16:39:31 +00:00
|
|
|
state = "failure"
|
2022-05-03 08:41:48 +00:00
|
|
|
test_results.append(("Tests are not finished", "FAIL", "0", ""))
|
2021-06-16 21:20:35 +00:00
|
|
|
elif retries:
|
|
|
|
description = "Some tests restarted, "
|
2021-09-30 15:44:30 +00:00
|
|
|
test_results.append(("Some tests restarted", "SKIPPED", "0", ""))
|
2021-02-20 20:04:24 +00:00
|
|
|
else:
|
|
|
|
description = ""
|
|
|
|
|
|
|
|
description += "fail: {}, passed: {}".format(failed, success)
|
|
|
|
if skipped != 0:
|
|
|
|
description += ", skipped: {}".format(skipped)
|
|
|
|
if unknown != 0:
|
|
|
|
description += ", unknown: {}".format(unknown)
|
|
|
|
else:
|
|
|
|
state = "failure"
|
|
|
|
description = "Output log doesn't exist"
|
|
|
|
test_results = []
|
|
|
|
|
|
|
|
return state, description, test_results
|
|
|
|
|
|
|
|
|
|
|
|
def write_results(results_file, status_file, results, status):
|
2022-03-22 16:39:58 +00:00
|
|
|
with open(results_file, "w") as f:
|
|
|
|
out = csv.writer(f, delimiter="\t")
|
2021-02-20 20:04:24 +00:00
|
|
|
out.writerows(results)
|
2022-03-22 16:39:58 +00:00
|
|
|
with open(status_file, "w") as f:
|
|
|
|
out = csv.writer(f, delimiter="\t")
|
2021-02-20 20:04:24 +00:00
|
|
|
out.writerow(status)
|
|
|
|
|
2021-02-22 14:43:06 +00:00
|
|
|
|
2021-02-20 20:04:24 +00:00
|
|
|
if __name__ == "__main__":
|
2022-03-22 16:39:58 +00:00
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
|
|
|
parser = argparse.ArgumentParser(
|
|
|
|
description="ClickHouse script for parsing results of functional tests"
|
|
|
|
)
|
|
|
|
parser.add_argument("--in-results-dir", default="/test_output/")
|
|
|
|
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
|
|
|
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
2021-02-20 20:04:24 +00:00
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
state, description, test_results = process_result(args.in_results_dir)
|
|
|
|
logging.info("Result parsed")
|
|
|
|
status = (state, description)
|
|
|
|
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
|
|
|
logging.info("Result written")
|