mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
process result of func and unit tests
This commit is contained in:
parent
6d9990f378
commit
eaed15b5e1
@ -61,3 +61,8 @@ fi
|
||||
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
||||
./process_functional_tests_result.py
|
||||
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/
|
||||
|
@ -46,4 +46,5 @@ ENV NUM_TRIES=1
|
||||
ENV MAX_RUN_TIME=0
|
||||
|
||||
COPY run.sh /
|
||||
COPY process_functional_tests_result.py /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
117
docker/test/stateless/process_functional_tests_result.py
Executable file
117
docker/test/stateless/process_functional_tests_result.py
Executable file
@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import logging
|
||||
import argparse
|
||||
import csv
|
||||
|
||||
OK_SIGN = "[ OK "
|
||||
FAIL_SING = "[ FAIL "
|
||||
TIMEOUT_SING = "[ Timeout! "
|
||||
UNKNOWN_SIGN = "[ UNKNOWN "
|
||||
SKIPPED_SIGN = "[ SKIPPED "
|
||||
HUNG_SIGN = "Found hung queries in processlist"
|
||||
|
||||
def process_test_log(log_path):
|
||||
total = 0
|
||||
skipped = 0
|
||||
unknown = 0
|
||||
failed = 0
|
||||
success = 0
|
||||
hung = False
|
||||
test_results = []
|
||||
with open(log_path, 'r') as test_file:
|
||||
for line in test_file:
|
||||
line = line.strip()
|
||||
if HUNG_SIGN in line:
|
||||
hung = True
|
||||
if any(sign in line for sign in (OK_SIGN, FAIL_SING, UNKNOWN_SIGN, SKIPPED_SIGN)):
|
||||
test_name = line.split(' ')[2].split(':')[0]
|
||||
|
||||
test_time = ''
|
||||
try:
|
||||
time_token = line.split(']')[1].strip().split()[0]
|
||||
float(time_token)
|
||||
test_time = time_token
|
||||
except:
|
||||
pass
|
||||
|
||||
total += 1
|
||||
if TIMEOUT_SING in line:
|
||||
failed += 1
|
||||
test_results.append((test_name, "Timeout", test_time))
|
||||
elif FAIL_SING in line:
|
||||
failed += 1
|
||||
test_results.append((test_name, "FAIL", test_time))
|
||||
elif UNKNOWN_SIGN in line:
|
||||
unknown += 1
|
||||
test_results.append((test_name, "FAIL", test_time))
|
||||
elif SKIPPED_SIGN in line:
|
||||
skipped += 1
|
||||
test_results.append((test_name, "SKIPPED", test_time))
|
||||
else:
|
||||
success += int(OK_SIGN in line)
|
||||
test_results.append((test_name, "OK", test_time))
|
||||
return total, skipped, unknown, failed, success, hung, test_results
|
||||
|
||||
def process_result(result_path):
|
||||
test_results = []
|
||||
state = "success"
|
||||
description = ""
|
||||
files = os.listdir(result_path)
|
||||
if files:
|
||||
logging.info("Find files in result folder %s", ','.join(files))
|
||||
result_path = os.path.join(result_path, 'test_result.txt')
|
||||
else:
|
||||
result_path = None
|
||||
description = "No output log"
|
||||
state = "error"
|
||||
|
||||
if result_path and os.path.exists(result_path):
|
||||
total, skipped, unknown, failed, success, hung, test_results = process_test_log(result_path)
|
||||
is_flacky_check = 1 < int(os.environ.get('NUM_TRIES', 1))
|
||||
# If no tests were run (success == 0) it indicates an error (e.g. server did not start or crashed immediately)
|
||||
# But it's Ok for "flaky checks" - they can contain just one test for check which is marked as skipped.
|
||||
if failed != 0 or unknown != 0 or (success == 0 and (not is_flacky_check)):
|
||||
state = "failure"
|
||||
|
||||
if hung:
|
||||
description = "Some queries hung, "
|
||||
state = "failure"
|
||||
else:
|
||||
description = ""
|
||||
|
||||
description += "fail: {}, passed: {}".format(failed, success)
|
||||
if skipped != 0:
|
||||
description += ", skipped: {}".format(skipped)
|
||||
if unknown != 0:
|
||||
description += ", unknown: {}".format(unknown)
|
||||
else:
|
||||
state = "failure"
|
||||
description = "Output log doesn't exist"
|
||||
test_results = []
|
||||
|
||||
return state, description, test_results
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
out.writerows(results)
|
||||
with open(status_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
out.writerow(status)
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of functional tests")
|
||||
parser.add_argument("--in-results-dir", default='/test_output/')
|
||||
parser.add_argument("--out-results-file", default='/test_output/test_results.tsv')
|
||||
parser.add_argument("--out-status-file", default='/test_output/check_status.tsv')
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results = process_result(args.in_results_dir)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
logging.info("Result written")
|
@ -68,5 +68,9 @@ export -f run_tests
|
||||
|
||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
|
||||
./process_functional_tests_result.py
|
||||
|
||||
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/
|
||||
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||
|
@ -86,7 +86,7 @@ if __name__ == "__main__":
|
||||
logging.info("All processes finished")
|
||||
if args.hung_check:
|
||||
logging.info("Will terminate gdb (if any)")
|
||||
res = call("killall -TERM gdb", shell=True, stderr=STDOUT)
|
||||
res = call("kill -TERM $(pidof clickhouse)", shell=True, stderr=STDOUT)
|
||||
logging.info("Checking if some queries hung")
|
||||
cmd = "{} {} {}".format(args.test_cmd, "--hung-check", "00001_select_1")
|
||||
res = call(cmd, shell=True, stderr=STDOUT)
|
||||
|
@ -5,6 +5,6 @@ ENV TZ=Europe/Moscow
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
RUN apt-get install gdb
|
||||
|
||||
CMD service zookeeper start && sleep 7 && /usr/share/zookeeper/bin/zkCli.sh -server localhost:2181 -create create /clickhouse_test ''; \
|
||||
gdb -q -ex 'set print inferior-events off' -ex 'set confirm off' -ex 'set print thread-events off' -ex run -ex bt -ex quit --args ./unit_tests_dbms | tee test_output/test_result.txt
|
||||
|
||||
COPY run.sh /
|
||||
COPY process_unit_tests_result.py /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
96
docker/test/unit/process_unit_tests_result.py
Executable file
96
docker/test/unit/process_unit_tests_result.py
Executable file
@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import logging
|
||||
import argparse
|
||||
import csv
|
||||
|
||||
OK_SIGN = 'OK ]'
|
||||
FAILED_SIGN = 'FAILED ]'
|
||||
SEGFAULT = 'Segmentation fault'
|
||||
SIGNAL = 'received signal SIG'
|
||||
PASSED = 'PASSED'
|
||||
|
||||
def get_test_name(line):
|
||||
elements = reversed(line.split(' '))
|
||||
for element in elements:
|
||||
if '(' not in element and ')' not in element:
|
||||
return element
|
||||
raise Exception("No test name in line '{}'".format(line))
|
||||
|
||||
def process_result(result_folder):
|
||||
summary = []
|
||||
total_counter = 0
|
||||
failed_counter = 0
|
||||
result_log_path = '{}/test_result.txt'.format(result_folder)
|
||||
if not os.path.exists(result_log_path):
|
||||
logging.info("No output log on path %s", result_log_path)
|
||||
return "exception", "No output log", []
|
||||
|
||||
status = "success"
|
||||
description = ""
|
||||
passed = False
|
||||
with open(result_log_path, 'r') as test_result:
|
||||
for line in test_result:
|
||||
if OK_SIGN in line:
|
||||
logging.info("Found ok line: '%s'", line)
|
||||
test_name = get_test_name(line.strip())
|
||||
logging.info("Test name: '%s'", test_name)
|
||||
summary.append((test_name, "OK"))
|
||||
total_counter += 1
|
||||
elif FAILED_SIGN in line and 'listed below' not in line and 'ms)' in line:
|
||||
logging.info("Found fail line: '%s'", line)
|
||||
test_name = get_test_name(line.strip())
|
||||
logging.info("Test name: '%s'", test_name)
|
||||
summary.append((test_name, "FAIL"))
|
||||
total_counter += 1
|
||||
failed_counter += 1
|
||||
elif SEGFAULT in line:
|
||||
logging.info("Found segfault line: '%s'", line)
|
||||
status = "failure"
|
||||
description += "Segmentation fault. "
|
||||
break
|
||||
elif SIGNAL in line:
|
||||
logging.info("Received signal line: '%s'", line)
|
||||
status = "failure"
|
||||
description += "Exit on signal. "
|
||||
break
|
||||
elif PASSED in line:
|
||||
logging.info("PASSED record found: '%s'", line)
|
||||
passed = True
|
||||
|
||||
if not passed:
|
||||
status = "failure"
|
||||
description += "PASSED record not found. "
|
||||
|
||||
if failed_counter != 0:
|
||||
status = "failure"
|
||||
|
||||
if not description:
|
||||
description += "fail: {}, passed: {}".format(failed_counter, total_counter - failed_counter)
|
||||
|
||||
return status, description, summary
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
out.writerows(results)
|
||||
with open(status_file, 'w') as f:
|
||||
out = csv.writer(f, delimiter='\t')
|
||||
out.writerow(status)
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||
parser = argparse.ArgumentParser(description="ClickHouse script for parsing results of functional tests")
|
||||
parser.add_argument("--in-results-dir", default='/test_output/')
|
||||
parser.add_argument("--out-results-file", default='/test_output/test_results.tsv')
|
||||
parser.add_argument("--out-status-file", default='/test_output/check_status.tsv')
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results = process_result(args.in_results_dir)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
logging.info("Result written")
|
||||
|
7
docker/test/unit/run.sh
Normal file
7
docker/test/unit/run.sh
Normal file
@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
service zookeeper start && sleep 7 && /usr/share/zookeeper/bin/zkCli.sh -server localhost:2181 -create create /clickhouse_test '';
|
||||
gdb -q -ex 'set print inferior-events off' -ex 'set confirm off' -ex 'set print thread-events off' -ex run -ex bt -ex quit --args ./unit_tests_dbms | tee test_output/test_result.txt
|
||||
./process_unit_tests_result.py
|
@ -208,8 +208,8 @@ def get_processlist(args):
|
||||
|
||||
# collect server stacktraces using gdb
|
||||
def get_stacktraces_from_gdb(server_pid):
|
||||
cmd = "gdb -batch -ex 'thread apply all backtrace' -p {}".format(server_pid)
|
||||
try:
|
||||
cmd = "gdb -batch -ex 'thread apply all backtrace' -p {}".format(server_pid)
|
||||
return subprocess.check_output(cmd, shell=True).decode('utf-8')
|
||||
except Exception as ex:
|
||||
print("Error occured while receiving stack traces from gdb: {}".format(str(ex)))
|
||||
@ -805,6 +805,9 @@ def main(args):
|
||||
print("\nLocated ClickHouse server process {} listening at TCP port {}".format(server_pid, clickhouse_tcp_port))
|
||||
print("\nCollecting stacktraces from all running threads with gdb:")
|
||||
bt = get_stacktraces_from_gdb(server_pid)
|
||||
if len(bt) < 1000:
|
||||
print("Got suspiciously small stacktraces: ", bt)
|
||||
bt = None
|
||||
if bt is None:
|
||||
print("\nCollecting stacktraces from system.stacktraces table:")
|
||||
bt = get_stacktraces_from_clickhouse(args.client)
|
||||
|
Loading…
Reference in New Issue
Block a user