Merge remote-tracking branch 'origin/aku/flaky-stateless' into HEAD

This commit is contained in:
Alexander Kuzmenkov 2021-03-30 14:04:02 +03:00
commit f2325b62f4
3 changed files with 18 additions and 9 deletions

View File

@ -51,6 +51,7 @@ function run_tests()
# Skip these tests, because they fail when we rerun them multiple times
if [ "$NUM_TRIES" -gt "1" ]; then
ADDITIONAL_OPTIONS+=('--order=random')
ADDITIONAL_OPTIONS+=('--skip')
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
fi
@ -75,7 +76,7 @@ timeout "$MAX_RUN_TIME" bash -c run_tests ||:
./process_functional_tests_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
clickhouse-client -q "sytem flush logs" ||:
clickhouse-client -q "system flush logs" ||:
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
clickhouse-client -q "select * from system.query_log format TSVWithNamesAndTypes" | pigz > /test_output/query-log.tsv.gz &

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python3
import shutil
import sys
import os
import os.path
@ -112,13 +113,14 @@ def get_db_engine(args, database_name):
return " ENGINE=" + args.db_engine
return "" # Will use default engine
def run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file):
def run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file, suite_tmp_dir):
# print(client_options)
start_time = datetime.now()
if args.database:
database = args.database
os.environ.setdefault("CLICKHOUSE_DATABASE", database)
os.environ.setdefault("CLICKHOUSE_TMP", suite_tmp_dir)
else:
# If --database is not specified, we will create temporary database with unique name
@ -136,6 +138,12 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
return clickhouse_proc_create, "", "Timeout creating database {} before test".format(database), total_time
os.environ["CLICKHOUSE_DATABASE"] = database
# Set temporary directory to match the randomly generated database,
# because .sh tests also use it for temporary files and we want to avoid
# collisions.
test_tmp_dir = os.path.join(suite_tmp_dir, database)
os.mkdir(test_tmp_dir)
os.environ.setdefault("CLICKHOUSE_TMP", test_tmp_dir)
# This is for .sh tests
os.environ["CLICKHOUSE_LOG_COMMENT"] = case_file
@ -185,6 +193,8 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
total_time = (datetime.now() - start_time).total_seconds()
return clickhouse_proc_create, "", "Timeout dropping database {} after test".format(database), total_time
shutil.rmtree(test_tmp_dir)
total_time = (datetime.now() - start_time).total_seconds()
# Normalize randomized database names in stdout, stderr files.
@ -369,7 +379,7 @@ def run_tests_array(all_tests_with_params):
stdout_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stdout'
stderr_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stderr'
proc, stdout, stderr, total_time = run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file)
proc, stdout, stderr, total_time = run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file, suite_tmp_dir)
if proc.returncode is None:
try:
@ -387,7 +397,7 @@ def run_tests_array(all_tests_with_params):
else:
counter = 1
while proc.returncode != 0 and need_retry(stderr):
proc, stdout, stderr, total_time = run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file)
proc, stdout, stderr, total_time = run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file, suite_tmp_dir)
sleep(2**counter)
counter += 1
if counter > 6:
@ -657,7 +667,6 @@ def main(args):
os.environ.setdefault("CLICKHOUSE_CONFIG", args.configserver)
if args.configclient:
os.environ.setdefault("CLICKHOUSE_CONFIG_CLIENT", args.configclient)
os.environ.setdefault("CLICKHOUSE_TMP", tmp_dir)
# Force to print server warnings in stderr
# Shell scripts could change logging level

View File

@ -21,7 +21,7 @@ system flush logs;
select count()
from system.query_log
where
query like '%01547_query_log_current_database%'
query like 'select \'01547_query_log_current_database%'
and current_database = currentDatabase()
and event_date >= yesterday();
@ -30,7 +30,6 @@ where
select count() == 2
from system.query_thread_log
where
query like '%01547_query_log_current_database%'
query like 'select \'01547_query_log_current_database%'
and current_database = currentDatabase()
and event_date = today()
and event_time >= now() - interval 1 minute;
and event_date >= yesterday()