mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Trying to split tests in sequential and parallel
This commit is contained in:
parent
5f9ef1ebbf
commit
8e9013561d
@ -56,4 +56,4 @@ if cat /usr/bin/clickhouse-test | grep -q -- "--use-skip-list"; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper "$SKIP_LIST_OPT" $ADDITIONAL_OPTIONS $SKIP_TESTS_OPTION 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
clickhouse-test -j 4 --testname --shard --zookeeper "$SKIP_LIST_OPT" $ADDITIONAL_OPTIONS $SKIP_TESTS_OPTION 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
@ -536,6 +536,9 @@ def main(args):
|
||||
else:
|
||||
args.skip = tests_to_skip_from_list
|
||||
|
||||
if args.use_skip_list and not args.sequential:
|
||||
args.sequential = collect_sequential_list(args.skip_list_path)
|
||||
|
||||
base_dir = os.path.abspath(args.queries)
|
||||
tmp_dir = os.path.abspath(args.tmp)
|
||||
|
||||
@ -649,10 +652,19 @@ def main(args):
|
||||
all_tests = [t for t in all_tests if any([re.search(r, t) for r in args.test])]
|
||||
all_tests.sort(key=key_func)
|
||||
|
||||
parallel_tests = []
|
||||
sequential_tests = []
|
||||
for test in all_tests:
|
||||
if any(s in test for s in args.sequential):
|
||||
sequential_tests.append(test)
|
||||
else:
|
||||
parallel_tests.append(test)
|
||||
|
||||
print("Found", len(parallel_tests), "parallel tests and", len(sequential_tests), "sequential tests")
|
||||
run_n, run_total = args.parallel.split('/')
|
||||
run_n = float(run_n)
|
||||
run_total = float(run_total)
|
||||
tests_n = len(all_tests)
|
||||
tests_n = len(parallel_tests)
|
||||
if run_total > tests_n:
|
||||
run_total = tests_n
|
||||
if run_n > run_total:
|
||||
@ -664,18 +676,20 @@ def main(args):
|
||||
if jobs > run_total:
|
||||
run_total = jobs
|
||||
|
||||
batch_size = len(all_tests) / jobs
|
||||
all_tests_array = []
|
||||
for i in range(0, len(all_tests), batch_size):
|
||||
all_tests_array.append((all_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir, run_total))
|
||||
batch_size = len(parallel_tests) / jobs
|
||||
parallel_tests_array = []
|
||||
for i in range(0, len(parallel_tests), batch_size):
|
||||
parallel_tests_array.append((parallel_tests[i:i+batch_size], suite, suite_dir, suite_tmp_dir, run_total))
|
||||
|
||||
if jobs > 1:
|
||||
with closing(multiprocessing.Pool(processes=jobs)) as pool:
|
||||
pool.map(run_tests_array, all_tests_array)
|
||||
else:
|
||||
run_tests_array(all_tests_array[int(run_n)-1])
|
||||
pool.map(run_tests_array, parallel_tests_array)
|
||||
|
||||
total_tests_run += tests_n
|
||||
run_tests_array((sequential_tests, suite, suite_dir, suite_tmp_dir, run_total))
|
||||
total_tests_run += len(sequential_tests) + len(parallel_tests)
|
||||
else:
|
||||
run_tests_array((all_tests, suite, suite_dir, suite_tmp_dir, run_total))
|
||||
total_tests_run += len(all_tests)
|
||||
|
||||
if args.hung_check:
|
||||
|
||||
@ -766,6 +780,20 @@ def collect_tests_to_skip(skip_list_path, build_flags):
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def collect_sequential_list(skip_list_path):
|
||||
if not os.path.exists(skip_list_path):
|
||||
return set([])
|
||||
|
||||
with open(skip_list_path, 'r') as skip_list_file:
|
||||
content = skip_list_file.read()
|
||||
# allows to have comments in skip_list.json
|
||||
skip_dict = json.loads(json_minify(content))
|
||||
if 'sequential' in skip_dict:
|
||||
return skip_dict['sequential']
|
||||
return set([])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser=ArgumentParser(description='ClickHouse functional tests')
|
||||
parser.add_argument('-q', '--queries', help='Path to queries dir')
|
||||
@ -797,6 +825,7 @@ if __name__ == '__main__':
|
||||
parser.add_argument('--no-stateless', action='store_true', help='Disable all stateless tests')
|
||||
parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests')
|
||||
parser.add_argument('--skip', nargs='+', help="Skip these tests")
|
||||
parser.add_argument('--sequential', nargs='+', help="Run these tests sequentially even if --parallel specified")
|
||||
parser.add_argument('--no-long', action='store_false', dest='no_long', help='Do not run long tests')
|
||||
parser.add_argument('--client-option', nargs='+', help='Specify additional client argument')
|
||||
parser.add_argument('--print-time', action='store_true', dest='print_time', help='Print test time')
|
||||
@ -840,6 +869,9 @@ if __name__ == '__main__':
|
||||
if args.skip_list_path is None:
|
||||
args.skip_list_path = os.path.join(args.queries, 'skip_list.json')
|
||||
|
||||
if args.sequential is None:
|
||||
args.sequential = set([])
|
||||
|
||||
if args.tmp is None:
|
||||
args.tmp = args.queries
|
||||
if args.client is None:
|
||||
|
@ -102,5 +102,57 @@
|
||||
/// Internal distionary name is different
|
||||
"01225_show_create_table_from_dictionary",
|
||||
"01224_no_superfluous_dict_reload"
|
||||
],
|
||||
"sequential":
|
||||
[
|
||||
/// Pessimistic list of tests which work badly in parallel.
|
||||
/// Probably they need better investigation.
|
||||
"01098_temporary_and_external_tables",
|
||||
"01083_expressions_in_engine_arguments",
|
||||
"00834_cancel_http_readonly_queries_on_client_close",
|
||||
"parallel_alter",
|
||||
"00417_kill_query",
|
||||
"01193_metadata_loading",
|
||||
"01294_lazy_database_concurrent",
|
||||
"01031_mutations_interpreter_and_context",
|
||||
"01305_replica_create_drop_zookeeper",
|
||||
"01092_memory_profiler",
|
||||
"01281_unsucceeded_insert_select_queries_counter",
|
||||
"00110_external_sort",
|
||||
"00682_empty_parts_merge",
|
||||
"00701_rollup",
|
||||
"00109_shard_totals_after_having",
|
||||
"ddl_dictionaries",
|
||||
"01251_dict_is_in_infinite_loop",
|
||||
"01259_dictionary_custom_settings_ddl",
|
||||
"01268_dictionary_direct_layout",
|
||||
"00652_replicated_mutations_zookeeper",
|
||||
"01238_http_memory_tracking", /// max_memory_usage_for_user can interfere another queries running concurrently
|
||||
"01281_group_by_limit_memory_tracking", /// max_memory_usage_for_user can interfere another queries running concurrently
|
||||
"01460_DistributedFilesToInsert",
|
||||
"live_view",
|
||||
"limit_memory",
|
||||
"memory_limit",
|
||||
"memory_leak",
|
||||
/// Bad tests for parallel run found by fasttest
|
||||
"00693_max_block_size_system_tables_columns",
|
||||
"01414_mutations_and_errors_zookeeper",
|
||||
"01013_sync_replica_timeout_zookeeper",
|
||||
"01045_zookeeper_system_mutations_with_parts_names",
|
||||
"00834_kill_mutation_replicated_zookeeper",
|
||||
"01415_sticking_mutations",
|
||||
"00933_test_fix_extra_seek_on_compressed_cache",
|
||||
"00754_alter_modify_column_partitions",
|
||||
"01471_calculate_ttl_during_merge",
|
||||
"01378_alter_rename_with_ttl_zookeeper",
|
||||
"01070_mutations_with_dependencies",
|
||||
"01070_modify_ttl",
|
||||
"01070_materialize_ttl",
|
||||
"00976_ttl_with_old_parts",
|
||||
"00933_alter_ttl",
|
||||
"01277_alter_rename_column_constraint_zookeeper",
|
||||
"00062_replicated_merge_tree_alter_zookeeper",
|
||||
"01388_clear_all_columns",
|
||||
"00699_materialized_view_mutations"
|
||||
]
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user