2020-10-02 16:54:07 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2021-03-30 00:39:33 +00:00
|
|
|
import shutil
|
2020-03-26 08:36:15 +00:00
|
|
|
import sys
|
2016-09-01 17:40:02 +00:00
|
|
|
import os
|
|
|
|
import os.path
|
2021-04-29 07:43:56 +00:00
|
|
|
import signal
|
2016-09-01 17:40:02 +00:00
|
|
|
import re
|
2020-07-03 10:57:16 +00:00
|
|
|
import json
|
2021-04-14 06:08:47 +00:00
|
|
|
import copy
|
2021-01-26 20:36:04 +00:00
|
|
|
import traceback
|
2016-09-01 17:40:02 +00:00
|
|
|
|
|
|
|
from argparse import ArgumentParser
|
2021-08-05 14:15:51 +00:00
|
|
|
from typing import Tuple, Union, Optional, TextIO
|
2017-08-31 20:03:47 +00:00
|
|
|
import shlex
|
2017-02-02 13:41:39 +00:00
|
|
|
import subprocess
|
2016-09-01 17:40:02 +00:00
|
|
|
from subprocess import Popen
|
|
|
|
from subprocess import PIPE
|
|
|
|
from subprocess import CalledProcessError
|
2020-12-15 13:33:14 +00:00
|
|
|
from subprocess import TimeoutExpired
|
2016-09-02 16:26:09 +00:00
|
|
|
from datetime import datetime
|
2020-08-26 17:44:03 +00:00
|
|
|
from time import time, sleep
|
2016-09-02 16:26:09 +00:00
|
|
|
from errno import ESRCH
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2019-07-17 12:46:20 +00:00
|
|
|
try:
|
|
|
|
import termcolor
|
|
|
|
except ImportError:
|
|
|
|
termcolor = None
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-01-26 20:36:04 +00:00
|
|
|
import random
|
|
|
|
import string
|
2019-06-03 17:36:27 +00:00
|
|
|
import multiprocessing
|
2021-08-02 13:51:33 +00:00
|
|
|
import socket
|
2019-04-22 23:40:40 +00:00
|
|
|
from contextlib import closing
|
2016-09-01 17:40:02 +00:00
|
|
|
|
2021-07-20 13:40:04 +00:00
|
|
|
USE_JINJA = True
|
|
|
|
try:
|
|
|
|
import jinja2
|
|
|
|
except ImportError:
|
|
|
|
USE_JINJA = False
|
|
|
|
print('WARNING: jinja2 not installed! Template tests will be skipped.')
|
|
|
|
|
2021-07-06 13:58:12 +00:00
|
|
|
DISTRIBUTED_DDL_TIMEOUT_MSG = "is executing longer than distributed_ddl_task_timeout"
|
2016-11-15 17:59:55 +00:00
|
|
|
|
2019-03-13 16:47:02 +00:00
|
|
|
MESSAGES_TO_RETRY = [
|
2019-03-29 08:26:42 +00:00
|
|
|
"DB::Exception: ZooKeeper session has been expired",
|
2021-05-11 17:33:44 +00:00
|
|
|
"DB::Exception: Connection loss",
|
|
|
|
"Coordination::Exception: Session expired",
|
2019-03-13 16:47:02 +00:00
|
|
|
"Coordination::Exception: Connection loss",
|
2021-05-19 18:35:05 +00:00
|
|
|
"Coordination::Exception: Operation timeout",
|
2021-06-15 20:52:29 +00:00
|
|
|
"DB::Exception: Operation timeout",
|
2020-05-28 22:53:15 +00:00
|
|
|
"Operation timed out",
|
2020-05-29 00:46:42 +00:00
|
|
|
"ConnectionPoolWithFailover: Connection failed at try",
|
2021-06-15 20:52:29 +00:00
|
|
|
"DB::Exception: New table appeared in database being dropped or detached. Try again",
|
2021-07-06 13:36:18 +00:00
|
|
|
"is already started to be removing by another replica right now",
|
2021-07-21 00:58:01 +00:00
|
|
|
"Shutdown is called for table", # It happens in SYSTEM SYNC REPLICA query if session with ZooKeeper is being reinitialized.
|
2021-08-06 14:38:28 +00:00
|
|
|
DISTRIBUTED_DDL_TIMEOUT_MSG # FIXME
|
2019-03-13 16:47:02 +00:00
|
|
|
]
|
|
|
|
|
2021-06-21 17:29:32 +00:00
|
|
|
MAX_RETRIES = 3
|
2021-06-15 20:52:29 +00:00
|
|
|
|
2021-07-20 13:40:04 +00:00
|
|
|
TEST_FILE_EXTENSIONS = ['.sql', '.sql.j2', '.sh', '.py', '.expect']
|
|
|
|
|
2021-04-29 07:43:56 +00:00
|
|
|
class Terminated(KeyboardInterrupt):
|
|
|
|
pass
|
2021-06-21 11:21:26 +00:00
|
|
|
|
2021-04-29 07:43:56 +00:00
|
|
|
def signal_handler(sig, frame):
|
|
|
|
raise Terminated(f'Terminated with {sig} signal')
|
|
|
|
|
|
|
|
def stop_tests():
|
2021-06-21 11:21:26 +00:00
|
|
|
global stop_tests_triggered_lock
|
|
|
|
global stop_tests_triggered
|
|
|
|
|
|
|
|
with stop_tests_triggered_lock:
|
|
|
|
if not stop_tests_triggered.is_set():
|
|
|
|
stop_tests_triggered.set()
|
|
|
|
|
|
|
|
# send signal to all processes in group to avoid hung check triggering
|
|
|
|
# (to avoid terminating clickhouse-test itself, the signal should be ignored)
|
|
|
|
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
|
|
|
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM)
|
|
|
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
2016-09-01 17:40:02 +00:00
|
|
|
|
2020-08-27 08:17:01 +00:00
|
|
|
def json_minify(string):
|
|
|
|
"""
|
|
|
|
Removes all js-style comments from json string. Allows to have comments in skip_list.json.
|
2021-08-05 14:15:51 +00:00
|
|
|
The code was taken from https://github.com/getify/JSON.minify/tree/python under the MIT license.
|
2020-08-27 08:17:01 +00:00
|
|
|
"""
|
|
|
|
|
2021-01-26 20:36:04 +00:00
|
|
|
tokenizer = re.compile(r'"|(/\*)|(\*/)|(//)|\n|\r')
|
2020-08-27 08:17:01 +00:00
|
|
|
end_slashes_re = re.compile(r'(\\)*$')
|
|
|
|
|
|
|
|
in_string = False
|
|
|
|
in_multi = False
|
|
|
|
in_single = False
|
|
|
|
|
|
|
|
new_str = []
|
|
|
|
index = 0
|
|
|
|
|
|
|
|
for match in re.finditer(tokenizer, string):
|
|
|
|
if not (in_multi or in_single):
|
|
|
|
tmp = string[index:match.start()]
|
|
|
|
new_str.append(tmp)
|
|
|
|
else:
|
|
|
|
# Replace comments with white space so that the JSON parser reports
|
|
|
|
# the correct column numbers on parsing errors.
|
|
|
|
new_str.append(' ' * (match.start() - index))
|
|
|
|
|
|
|
|
index = match.end()
|
|
|
|
val = match.group()
|
|
|
|
|
|
|
|
if val == '"' and not (in_multi or in_single):
|
|
|
|
escaped = end_slashes_re.search(string, 0, match.start())
|
|
|
|
|
|
|
|
# start of string or unescaped quote character to end string
|
|
|
|
if not in_string or (escaped is None or len(escaped.group()) % 2 == 0): # noqa
|
|
|
|
in_string = not in_string
|
|
|
|
index -= 1 # include " character in next catch
|
|
|
|
elif not (in_string or in_multi or in_single):
|
|
|
|
if val == '/*':
|
|
|
|
in_multi = True
|
|
|
|
elif val == '//':
|
|
|
|
in_single = True
|
|
|
|
elif val == '*/' and in_multi and not (in_string or in_single):
|
|
|
|
in_multi = False
|
|
|
|
new_str.append(' ' * len(val))
|
|
|
|
elif val in '\r\n' and not (in_multi or in_string) and in_single:
|
|
|
|
in_single = False
|
|
|
|
elif not in_multi or in_single: # noqa
|
|
|
|
new_str.append(val)
|
|
|
|
|
|
|
|
if val in '\r\n':
|
|
|
|
new_str.append(val)
|
|
|
|
elif in_multi or in_single:
|
|
|
|
new_str.append(' ' * len(val))
|
|
|
|
|
|
|
|
new_str.append(string[index:])
|
|
|
|
return ''.join(new_str)
|
|
|
|
|
|
|
|
|
2017-09-14 17:13:40 +00:00
|
|
|
def remove_control_characters(s):
|
|
|
|
"""
|
|
|
|
https://github.com/html5lib/html5lib-python/issues/96#issuecomment-43438438
|
|
|
|
"""
|
|
|
|
def str_to_int(s, default, base=10):
|
|
|
|
if int(s, base) < 0x10000:
|
2020-10-02 16:54:07 +00:00
|
|
|
return chr(int(s, base))
|
2017-09-14 17:13:40 +00:00
|
|
|
return default
|
2019-04-18 18:48:04 +00:00
|
|
|
s = re.sub(r"&#(\d+);?", lambda c: str_to_int(c.group(1), c.group(0)), s)
|
|
|
|
s = re.sub(r"&#[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), s)
|
|
|
|
s = re.sub(r"[\x00-\x08\x0b\x0e-\x1f\x7f]", "", s)
|
2017-09-14 17:13:40 +00:00
|
|
|
return s
|
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-02-15 10:26:34 +00:00
|
|
|
def get_db_engine(args, database_name):
|
|
|
|
if args.replicated_database:
|
2021-08-05 14:15:51 +00:00
|
|
|
return f" ON CLUSTER test_cluster_database_replicated \
|
|
|
|
ENGINE=Replicated('/test/clickhouse/db/{database_name}', \
|
|
|
|
'{{shard}}', '{{replica}}')"
|
2020-09-21 10:24:10 +00:00
|
|
|
if args.db_engine:
|
|
|
|
return " ENGINE=" + args.db_engine
|
|
|
|
return "" # Will use default engine
|
2019-10-11 10:30:32 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-05-18 13:06:00 +00:00
|
|
|
def configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file):
|
2021-04-14 06:08:47 +00:00
|
|
|
testcase_args = copy.deepcopy(args)
|
2019-10-11 13:34:26 +00:00
|
|
|
|
2021-04-14 06:08:47 +00:00
|
|
|
testcase_args.testcase_start_time = datetime.now()
|
2021-06-22 07:54:39 +00:00
|
|
|
testcase_basename = os.path.basename(case_file)
|
|
|
|
testcase_args.testcase_client = f"{testcase_args.client} --log_comment='{testcase_basename}'"
|
2021-04-05 03:55:58 +00:00
|
|
|
|
2021-04-14 06:08:47 +00:00
|
|
|
if testcase_args.database:
|
|
|
|
database = testcase_args.database
|
2020-05-13 20:17:12 +00:00
|
|
|
os.environ.setdefault("CLICKHOUSE_DATABASE", database)
|
2021-03-30 00:39:33 +00:00
|
|
|
os.environ.setdefault("CLICKHOUSE_TMP", suite_tmp_dir)
|
2020-05-13 20:17:12 +00:00
|
|
|
else:
|
|
|
|
# If --database is not specified, we will create temporary database with unique name
|
|
|
|
# And we will recreate and drop it for each test
|
|
|
|
def random_str(length=6):
|
|
|
|
alphabet = string.ascii_lowercase + string.digits
|
|
|
|
return ''.join(random.choice(alphabet) for _ in range(length))
|
|
|
|
database = 'test_{suffix}'.format(suffix=random_str())
|
|
|
|
|
2021-05-18 13:06:00 +00:00
|
|
|
with open(stderr_file, 'w') as stderr:
|
2021-08-05 14:15:51 +00:00
|
|
|
client_cmd = testcase_args.testcase_client + " " \
|
|
|
|
+ get_additional_client_options(args)
|
|
|
|
|
|
|
|
clickhouse_proc_create = open_client_process(
|
|
|
|
universal_newlines=True,
|
|
|
|
client_args=client_cmd,
|
|
|
|
stderr_file=stderr)
|
|
|
|
|
2021-05-18 13:06:00 +00:00
|
|
|
try:
|
|
|
|
clickhouse_proc_create.communicate(("CREATE DATABASE " + database + get_db_engine(testcase_args, database)), timeout=testcase_args.timeout)
|
|
|
|
except TimeoutExpired:
|
|
|
|
total_time = (datetime.now() - testcase_args.testcase_start_time).total_seconds()
|
|
|
|
return clickhouse_proc_create, "", "Timeout creating database {} before test".format(database), total_time
|
2020-05-13 20:17:12 +00:00
|
|
|
|
|
|
|
os.environ["CLICKHOUSE_DATABASE"] = database
|
2021-03-30 00:39:33 +00:00
|
|
|
# Set temporary directory to match the randomly generated database,
|
|
|
|
# because .sh tests also use it for temporary files and we want to avoid
|
|
|
|
# collisions.
|
2021-04-14 06:08:47 +00:00
|
|
|
testcase_args.test_tmp_dir = os.path.join(suite_tmp_dir, database)
|
|
|
|
os.mkdir(testcase_args.test_tmp_dir)
|
|
|
|
os.environ.setdefault("CLICKHOUSE_TMP", testcase_args.test_tmp_dir)
|
|
|
|
|
|
|
|
testcase_args.testcase_database = database
|
|
|
|
|
|
|
|
return testcase_args
|
|
|
|
|
|
|
|
def run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file):
|
2021-06-05 09:59:24 +00:00
|
|
|
client = args.testcase_client
|
2021-04-14 06:08:47 +00:00
|
|
|
start_time = args.testcase_start_time
|
|
|
|
database = args.testcase_database
|
2020-05-13 20:17:12 +00:00
|
|
|
|
2021-01-28 17:31:34 +00:00
|
|
|
# This is for .sh tests
|
2021-03-13 15:05:24 +00:00
|
|
|
os.environ["CLICKHOUSE_LOG_COMMENT"] = case_file
|
2021-01-28 17:31:34 +00:00
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
params = {
|
2021-04-05 03:55:58 +00:00
|
|
|
'client': client + ' --database=' + database,
|
2019-10-11 10:30:32 +00:00
|
|
|
'logs_level': server_logs_level,
|
|
|
|
'options': client_options,
|
|
|
|
'test': case_file,
|
|
|
|
'stdout': stdout_file,
|
|
|
|
'stderr': stderr_file,
|
|
|
|
}
|
|
|
|
|
2021-05-22 06:41:47 +00:00
|
|
|
# >> append to stderr (but not stdout since it is not used there),
|
|
|
|
# because there are also output of per test database creation
|
2021-05-18 13:06:00 +00:00
|
|
|
if not args.database:
|
2021-05-22 06:41:47 +00:00
|
|
|
pattern = '{test} > {stdout} 2>> {stderr}'
|
2021-05-18 13:06:00 +00:00
|
|
|
else:
|
|
|
|
pattern = '{test} > {stdout} 2> {stderr}'
|
2019-10-11 13:34:26 +00:00
|
|
|
|
2019-03-13 16:47:02 +00:00
|
|
|
if ext == '.sql':
|
2021-04-05 03:55:58 +00:00
|
|
|
pattern = "{client} --send_logs_level={logs_level} --testmode --multiquery {options} < " + pattern
|
2019-10-11 13:34:26 +00:00
|
|
|
|
|
|
|
command = pattern.format(**params)
|
2020-10-12 11:17:35 +00:00
|
|
|
|
2019-10-11 13:34:26 +00:00
|
|
|
proc = Popen(command, shell=True, env=os.environ)
|
2020-10-12 11:17:35 +00:00
|
|
|
|
2019-03-13 16:47:02 +00:00
|
|
|
while (datetime.now() - start_time).total_seconds() < args.timeout and proc.poll() is None:
|
|
|
|
sleep(0.01)
|
|
|
|
|
2021-01-26 17:51:25 +00:00
|
|
|
need_drop_database = not args.database
|
|
|
|
if need_drop_database and args.no_drop_if_fail:
|
|
|
|
maybe_passed = (proc.returncode == 0) and (proc.stderr is None) and (proc.stdout is None or 'Exception' not in proc.stdout)
|
|
|
|
need_drop_database = not maybe_passed
|
|
|
|
|
|
|
|
if need_drop_database:
|
2021-05-18 13:06:00 +00:00
|
|
|
with open(stderr_file, 'a') as stderr:
|
2021-08-05 14:15:51 +00:00
|
|
|
clickhouse_proc_create = open_client_process(client, universal_newlines=True, stderr_file=stderr)
|
|
|
|
|
2021-03-26 16:40:02 +00:00
|
|
|
seconds_left = max(args.timeout - (datetime.now() - start_time).total_seconds(), 20)
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2020-12-15 13:33:14 +00:00
|
|
|
try:
|
2021-03-13 01:25:13 +00:00
|
|
|
drop_database_query = "DROP DATABASE " + database
|
|
|
|
if args.replicated_database:
|
|
|
|
drop_database_query += " ON CLUSTER test_cluster_database_replicated"
|
|
|
|
clickhouse_proc_create.communicate((drop_database_query), timeout=seconds_left)
|
2020-12-15 13:33:14 +00:00
|
|
|
except TimeoutExpired:
|
2020-12-15 16:20:09 +00:00
|
|
|
# kill test process because it can also hung
|
|
|
|
if proc.returncode is None:
|
|
|
|
try:
|
|
|
|
proc.kill()
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != ESRCH:
|
|
|
|
raise
|
2020-12-17 20:13:49 +00:00
|
|
|
|
|
|
|
total_time = (datetime.now() - start_time).total_seconds()
|
2021-08-05 14:15:51 +00:00
|
|
|
return clickhouse_proc_create, "", f"Timeout dropping database {database} after test", total_time
|
2020-05-13 20:03:10 +00:00
|
|
|
|
2021-04-14 06:08:47 +00:00
|
|
|
shutil.rmtree(args.test_tmp_dir)
|
2021-03-30 00:39:33 +00:00
|
|
|
|
2019-12-03 09:59:41 +00:00
|
|
|
total_time = (datetime.now() - start_time).total_seconds()
|
|
|
|
|
2019-06-15 19:20:56 +00:00
|
|
|
# Normalize randomized database names in stdout, stderr files.
|
2020-05-13 20:17:12 +00:00
|
|
|
os.system("LC_ALL=C sed -i -e 's/{test_db}/default/g' {file}".format(test_db=database, file=stdout_file))
|
2021-06-03 18:10:58 +00:00
|
|
|
if args.hide_db_name:
|
2021-01-26 17:51:25 +00:00
|
|
|
os.system("LC_ALL=C sed -i -e 's/{test_db}/default/g' {file}".format(test_db=database, file=stderr_file))
|
2021-03-18 12:49:31 +00:00
|
|
|
if args.replicated_database:
|
|
|
|
os.system("LC_ALL=C sed -i -e 's|/auto_{{shard}}||g' {file}".format(file=stdout_file))
|
|
|
|
os.system("LC_ALL=C sed -i -e 's|auto_{{replica}}||g' {file}".format(file=stdout_file))
|
2019-06-10 10:41:53 +00:00
|
|
|
|
2021-08-02 13:51:33 +00:00
|
|
|
# Normalize hostname in stdout file.
|
|
|
|
os.system("LC_ALL=C sed -i -e 's/{hostname}/localhost/g' {file}".format(hostname=socket.gethostname(), file=stdout_file))
|
|
|
|
|
2020-10-02 16:54:07 +00:00
|
|
|
stdout = open(stdout_file, 'rb').read() if os.path.exists(stdout_file) else b''
|
|
|
|
stdout = str(stdout, errors='replace', encoding='utf-8')
|
|
|
|
stderr = open(stderr_file, 'rb').read() if os.path.exists(stderr_file) else b''
|
|
|
|
stderr = str(stderr, errors='replace', encoding='utf-8')
|
2019-03-13 16:47:02 +00:00
|
|
|
|
2019-12-03 09:59:41 +00:00
|
|
|
return proc, stdout, stderr, total_time
|
2019-03-13 16:47:02 +00:00
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
|
2021-07-26 16:19:47 +00:00
|
|
|
def need_retry(stdout, stderr):
|
|
|
|
return any(msg in stdout for msg in MESSAGES_TO_RETRY) or any(msg in stderr for msg in MESSAGES_TO_RETRY)
|
2019-03-13 16:47:02 +00:00
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
|
2021-01-25 08:53:04 +00:00
|
|
|
def get_processlist(args):
|
2019-03-13 11:03:57 +00:00
|
|
|
try:
|
2021-03-18 12:49:31 +00:00
|
|
|
query = b"SHOW PROCESSLIST FORMAT Vertical"
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-03-18 12:49:31 +00:00
|
|
|
if args.replicated_database:
|
|
|
|
query = b"SELECT materialize((hostName(), tcpPort())) as host, * " \
|
2021-06-15 20:52:29 +00:00
|
|
|
b"FROM clusterAllReplicas('test_cluster_database_replicated', system.processes) " \
|
|
|
|
b"WHERE query NOT LIKE '%system.processes%' FORMAT Vertical"
|
2021-08-05 14:15:51 +00:00
|
|
|
|
|
|
|
clickhouse_proc = open_client_process(args.client)
|
|
|
|
|
2021-03-30 17:27:58 +00:00
|
|
|
(stdout, _) = clickhouse_proc.communicate((query), timeout=20)
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-01-25 08:53:04 +00:00
|
|
|
return False, stdout.decode('utf-8')
|
|
|
|
except Exception as ex:
|
|
|
|
print("Exception", ex)
|
|
|
|
return True, ""
|
2019-03-13 11:03:57 +00:00
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
|
2020-03-23 18:17:07 +00:00
|
|
|
# collect server stacktraces using gdb
|
2020-03-23 17:30:31 +00:00
|
|
|
def get_stacktraces_from_gdb(server_pid):
|
2019-03-13 11:03:57 +00:00
|
|
|
try:
|
2021-08-05 14:15:51 +00:00
|
|
|
cmd = f"gdb -batch -ex 'thread apply all backtrace' -p {server_pid}"
|
2020-10-27 08:44:58 +00:00
|
|
|
return subprocess.check_output(cmd, shell=True).decode('utf-8')
|
2021-08-05 14:15:51 +00:00
|
|
|
except Exception as e:
|
|
|
|
print(f"Error occurred while receiving stack traces from gdb: {e}")
|
2021-02-19 14:38:20 +00:00
|
|
|
return None
|
2020-03-23 17:30:31 +00:00
|
|
|
|
|
|
|
|
2020-03-23 18:17:07 +00:00
|
|
|
# collect server stacktraces from system.stack_trace table
|
2020-05-13 20:03:10 +00:00
|
|
|
# it does not work in Sandbox
|
2021-06-15 20:52:29 +00:00
|
|
|
def get_stacktraces_from_clickhouse(client, replicated_database=False):
|
2021-08-05 14:15:51 +00:00
|
|
|
replicated_msg = \
|
|
|
|
"{} --allow_introspection_functions=1 --skip_unavailable_shards=1 --query \
|
|
|
|
\"SELECT materialize((hostName(), tcpPort())) as host, thread_id, \
|
|
|
|
arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), \
|
|
|
|
arrayMap(x -> addressToLine(x), trace), \
|
|
|
|
arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace \
|
|
|
|
FROM clusterAllReplicas('test_cluster_database_replicated', 'system.stack_trace') \
|
|
|
|
ORDER BY host, thread_id FORMAT Vertical\"".format(client)
|
|
|
|
|
|
|
|
msg = \
|
|
|
|
"{} --allow_introspection_functions=1 --query \
|
|
|
|
\"SELECT arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), \
|
|
|
|
arrayMap(x -> addressToLine(x), trace), \
|
|
|
|
arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace \
|
|
|
|
FROM system.stack_trace FORMAT Vertical\"".format(client)
|
|
|
|
|
2020-03-23 17:30:31 +00:00
|
|
|
try:
|
2021-08-05 14:15:51 +00:00
|
|
|
return subprocess.check_output(
|
|
|
|
replicated_msg if replicated_database else msg,
|
|
|
|
shell=True, stderr=subprocess.STDOUT).decode('utf-8')
|
|
|
|
except Exception as e:
|
|
|
|
print(f"Error occurred while receiving stack traces from client: {e}")
|
2021-02-19 14:38:20 +00:00
|
|
|
return None
|
2019-03-13 11:03:57 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
|
|
|
|
def print_stacktraces() -> None:
|
|
|
|
server_pid = get_server_pid()
|
|
|
|
|
|
|
|
bt = None
|
|
|
|
|
|
|
|
if server_pid and not args.replicated_database:
|
|
|
|
print("")
|
|
|
|
print(f"Located ClickHouse server process {server_pid} listening at TCP port {args.tcp_port}")
|
|
|
|
print("Collecting stacktraces from all running threads with gdb:")
|
|
|
|
|
|
|
|
bt = get_stacktraces_from_gdb(server_pid)
|
|
|
|
|
|
|
|
if len(bt) < 1000:
|
|
|
|
print("Got suspiciously small stacktraces: ", bt)
|
|
|
|
bt = None
|
|
|
|
|
|
|
|
if bt is None:
|
|
|
|
print("\nCollecting stacktraces from system.stacktraces table:")
|
|
|
|
|
|
|
|
bt = get_stacktraces_from_clickhouse(
|
|
|
|
args.client, args.replicated_database)
|
|
|
|
|
|
|
|
if bt is not None:
|
|
|
|
print(bt)
|
|
|
|
return
|
|
|
|
|
|
|
|
print(colored(
|
|
|
|
f"\nUnable to locate ClickHouse server process listening at TCP port {args.tcp_port}. "
|
|
|
|
"It must have crashed or exited prematurely!",
|
|
|
|
args, "red", attrs=["bold"]))
|
|
|
|
|
|
|
|
|
|
|
|
def get_server_pid():
|
2021-02-19 14:38:20 +00:00
|
|
|
# lsof does not work in stress tests for some reason
|
2021-08-05 14:15:51 +00:00
|
|
|
cmd_lsof = f"lsof -i tcp:{args.tcp_port} -s tcp:LISTEN -Fp | awk '/^p[0-9]+$/{{print substr($0, 2)}}'"
|
2021-02-19 14:38:20 +00:00
|
|
|
cmd_pidof = "pidof -s clickhouse-server"
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-02-19 14:38:20 +00:00
|
|
|
commands = [cmd_lsof, cmd_pidof]
|
2021-02-19 09:57:09 +00:00
|
|
|
output = None
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-02-19 14:38:20 +00:00
|
|
|
for cmd in commands:
|
|
|
|
try:
|
2021-03-13 05:39:08 +00:00
|
|
|
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True)
|
2021-02-19 14:38:20 +00:00
|
|
|
if output:
|
|
|
|
return int(output)
|
|
|
|
except Exception as e:
|
2021-08-05 14:15:51 +00:00
|
|
|
print(f"Cannot get server pid with {cmd}, got {output}: {e}")
|
|
|
|
|
|
|
|
return None # most likely server is dead
|
2019-03-13 11:03:57 +00:00
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
|
2019-04-22 23:40:40 +00:00
|
|
|
def colored(text, args, color=None, on_color=None, attrs=None):
|
2021-01-26 20:36:04 +00:00
|
|
|
if termcolor and (sys.stdout.isatty() or args.force_color):
|
|
|
|
return termcolor.colored(text, color, on_color, attrs)
|
|
|
|
else:
|
|
|
|
return text
|
2019-04-22 23:40:40 +00:00
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
|
2020-08-26 17:44:03 +00:00
|
|
|
stop_time = None
|
2021-06-21 11:21:26 +00:00
|
|
|
exit_code = multiprocessing.Value("i", 0)
|
|
|
|
server_died = multiprocessing.Event()
|
|
|
|
stop_tests_triggered_lock = multiprocessing.Lock()
|
|
|
|
stop_tests_triggered = multiprocessing.Event()
|
2021-05-20 16:44:35 +00:00
|
|
|
queue = multiprocessing.Queue(maxsize=1)
|
2021-08-05 14:15:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
def print_test_time(test_time) -> str:
|
|
|
|
if args.print_time:
|
|
|
|
return " {0:.2f} sec.".format(test_time)
|
|
|
|
else:
|
|
|
|
return ''
|
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
|
|
|
|
def should_skip_test_by_name(name: str, test_ext: str) -> Tuple[bool, str]:
|
|
|
|
if args.skip and any(s in name for s in args.skip):
|
|
|
|
return True, "skip"
|
|
|
|
|
|
|
|
if not args.zookeeper and ('zookeeper' in name or 'replica' in name):
|
|
|
|
return True, "no zookeeper"
|
|
|
|
|
|
|
|
if not args.shard and \
|
|
|
|
('shard' in name or 'distributed' in name or 'global' in name):
|
|
|
|
return True, "no shard"
|
|
|
|
|
|
|
|
# Tests for races and deadlocks usually are run in a loop for a significant
|
|
|
|
# amount of time
|
|
|
|
if args.no_long and \
|
|
|
|
('long' in name or 'deadlock' in name or 'race' in name):
|
|
|
|
return True, "no long"
|
|
|
|
|
|
|
|
if not USE_JINJA and test_ext.endswith("j2"):
|
|
|
|
return True, "no jinja"
|
|
|
|
|
|
|
|
return False, ""
|
|
|
|
|
|
|
|
|
|
|
|
def should_skip_disabled_test(name: str, suite_dir: str) -> Tuple[bool, str]:
|
|
|
|
disabled_file = os.path.join(suite_dir, name) + '.disabled'
|
|
|
|
|
|
|
|
if os.path.exists(disabled_file) and not args.disabled:
|
|
|
|
return True, open(disabled_file, 'r').read()
|
|
|
|
|
|
|
|
return False, ""
|
|
|
|
|
|
|
|
|
|
|
|
# should skip test, should increment skipped_total, skip reason
|
|
|
|
def should_skip_test(name: str, test_ext: str, suite_dir: str) -> Tuple[bool, bool, str]:
|
|
|
|
should_skip, skip_reason = should_skip_test_by_name(name, test_ext)
|
|
|
|
|
|
|
|
if should_skip:
|
|
|
|
return True, True, skip_reason
|
|
|
|
|
|
|
|
should_skip, skip_reason = should_skip_disabled_test(name, suite_dir)
|
|
|
|
|
|
|
|
return should_skip, False, skip_reason
|
|
|
|
|
|
|
|
|
|
|
|
def send_test_name_failed(suite: str, case: str) -> bool:
|
|
|
|
clickhouse_proc = open_client_process(args.client, universal_newlines=True)
|
|
|
|
|
|
|
|
failed_to_check = False
|
|
|
|
|
|
|
|
pid = os.getpid()
|
|
|
|
query = f"SELECT 'Running test {suite}/{case} from pid={pid}';"
|
|
|
|
|
|
|
|
try:
|
|
|
|
clickhouse_proc.communicate((query), timeout=20)
|
|
|
|
except:
|
|
|
|
failed_to_check = True
|
|
|
|
|
|
|
|
return failed_to_check or clickhouse_proc.returncode != 0
|
|
|
|
|
|
|
|
|
2021-06-15 20:52:29 +00:00
|
|
|
restarted_tests = [] # (test, stderr)
|
2020-03-26 08:36:15 +00:00
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
# def run_tests_array(all_tests, suite, suite_dir, suite_tmp_dir, run_total):
|
2020-03-26 08:36:15 +00:00
|
|
|
def run_tests_array(all_tests_with_params):
|
2021-05-20 16:44:35 +00:00
|
|
|
all_tests, num_tests, suite, suite_dir, suite_tmp_dir = all_tests_with_params
|
2020-08-26 17:44:03 +00:00
|
|
|
global stop_time
|
2021-06-21 11:21:26 +00:00
|
|
|
global exit_code
|
|
|
|
global server_died
|
2017-05-01 21:27:11 +00:00
|
|
|
|
2019-04-22 23:40:40 +00:00
|
|
|
OP_SQUARE_BRACKET = colored("[", args, attrs=['bold'])
|
|
|
|
CL_SQUARE_BRACKET = colored("]", args, attrs=['bold'])
|
|
|
|
|
|
|
|
MSG_FAIL = OP_SQUARE_BRACKET + colored(" FAIL ", args, "red", attrs=['bold']) + CL_SQUARE_BRACKET
|
|
|
|
MSG_UNKNOWN = OP_SQUARE_BRACKET + colored(" UNKNOWN ", args, "yellow", attrs=['bold']) + CL_SQUARE_BRACKET
|
|
|
|
MSG_OK = OP_SQUARE_BRACKET + colored(" OK ", args, "green", attrs=['bold']) + CL_SQUARE_BRACKET
|
|
|
|
MSG_SKIPPED = OP_SQUARE_BRACKET + colored(" SKIPPED ", args, "cyan", attrs=['bold']) + CL_SQUARE_BRACKET
|
|
|
|
|
|
|
|
passed_total = 0
|
|
|
|
skipped_total = 0
|
|
|
|
failures_total = 0
|
|
|
|
failures = 0
|
|
|
|
failures_chain = 0
|
2021-03-29 18:14:06 +00:00
|
|
|
start_time = datetime.now()
|
2017-05-01 21:27:11 +00:00
|
|
|
|
2021-03-29 18:19:13 +00:00
|
|
|
is_concurrent = multiprocessing.current_process().name != "MainProcess"
|
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
client_options = get_additional_client_options(args)
|
|
|
|
|
2021-05-20 16:02:46 +00:00
|
|
|
if num_tests > 0:
|
|
|
|
about = 'about ' if is_concurrent else ''
|
2021-08-05 14:15:51 +00:00
|
|
|
proc_name = multiprocessing.current_process().name
|
|
|
|
print(f"\nRunning {about}{num_tests} {suite} tests ({proc_name}).\n")
|
2021-05-20 16:02:46 +00:00
|
|
|
|
|
|
|
while True:
|
2021-05-20 16:44:35 +00:00
|
|
|
if is_concurrent:
|
|
|
|
case = queue.get()
|
|
|
|
if not case:
|
2021-05-20 16:02:46 +00:00
|
|
|
break
|
|
|
|
else:
|
2021-05-20 16:44:35 +00:00
|
|
|
if all_tests:
|
|
|
|
case = all_tests.pop(0)
|
2021-05-20 16:02:46 +00:00
|
|
|
else:
|
|
|
|
break
|
2019-04-09 13:17:36 +00:00
|
|
|
|
2021-06-21 11:21:26 +00:00
|
|
|
if server_died.is_set():
|
2021-04-29 07:43:56 +00:00
|
|
|
stop_tests()
|
2019-04-22 23:40:40 +00:00
|
|
|
break
|
2019-04-09 13:17:36 +00:00
|
|
|
|
2020-08-26 17:44:03 +00:00
|
|
|
if stop_time and time() > stop_time:
|
|
|
|
print("\nStop tests run because global time limit is exceeded.\n")
|
2021-04-29 07:43:56 +00:00
|
|
|
stop_tests()
|
2020-08-26 17:44:03 +00:00
|
|
|
break
|
|
|
|
|
2019-04-22 23:40:40 +00:00
|
|
|
case_file = os.path.join(suite_dir, case)
|
|
|
|
(name, ext) = os.path.splitext(case)
|
|
|
|
|
|
|
|
try:
|
2021-01-20 05:27:42 +00:00
|
|
|
status = ''
|
|
|
|
if not is_concurrent:
|
|
|
|
sys.stdout.flush()
|
2021-07-20 14:49:20 +00:00
|
|
|
sys.stdout.write("{0:72}".format(removesuffix(name, ".gen", ".sql") + ": "))
|
2021-01-20 05:27:42 +00:00
|
|
|
# This flush is needed so you can see the test name of the long
|
|
|
|
# running test before it will finish. But don't do it in parallel
|
|
|
|
# mode, so that the lines don't mix.
|
|
|
|
sys.stdout.flush()
|
|
|
|
else:
|
2021-07-20 14:49:20 +00:00
|
|
|
status = "{0:72}".format(removesuffix(name, ".gen", ".sql") + ": ")
|
2019-04-22 23:40:40 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
skip_test, increment_skip_count, skip_reason = \
|
|
|
|
should_skip_test(name, ext, suite_dir)
|
|
|
|
|
|
|
|
if skip_test:
|
|
|
|
status += MSG_SKIPPED + f" - {skip_reason}\n"
|
2019-04-22 23:40:40 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
if increment_skip_count:
|
|
|
|
skipped_total += 1
|
|
|
|
else:
|
|
|
|
if args.testname and send_test_name_failed(suite, case):
|
|
|
|
failures += 1
|
|
|
|
print("Server does not respond to health check")
|
|
|
|
server_died.set()
|
|
|
|
stop_tests()
|
|
|
|
break
|
|
|
|
|
|
|
|
file_suffix = ('.' + str(os.getpid())) if is_concurrent and args.test_runs > 1 else ''
|
|
|
|
reference_file = get_reference_file(suite_dir, name)
|
|
|
|
stdout_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stdout'
|
|
|
|
stderr_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stderr'
|
|
|
|
|
|
|
|
testcase_args = configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file)
|
|
|
|
proc, stdout, stderr, total_time = run_single_test(testcase_args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file)
|
|
|
|
|
|
|
|
if proc.returncode is None:
|
|
|
|
try:
|
|
|
|
proc.kill()
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != ESRCH:
|
|
|
|
raise
|
|
|
|
|
|
|
|
failures += 1
|
|
|
|
status += MSG_FAIL
|
|
|
|
status += print_test_time(total_time)
|
|
|
|
status += " - Timeout!\n"
|
|
|
|
if stderr:
|
|
|
|
status += stderr
|
|
|
|
status += 'Database: ' + testcase_args.testcase_database
|
2019-04-22 23:40:40 +00:00
|
|
|
else:
|
2021-08-06 14:38:28 +00:00
|
|
|
counter = 1
|
|
|
|
while need_retry(stdout, stderr):
|
|
|
|
restarted_tests.append((case_file, stderr))
|
|
|
|
testcase_args = configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file)
|
|
|
|
proc, stdout, stderr, total_time = run_single_test(testcase_args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file)
|
|
|
|
sleep(2**counter)
|
|
|
|
counter += 1
|
|
|
|
if MAX_RETRIES < counter:
|
|
|
|
if args.replicated_database:
|
|
|
|
if DISTRIBUTED_DDL_TIMEOUT_MSG in stderr:
|
|
|
|
server_died.set()
|
2020-09-25 02:27:13 +00:00
|
|
|
break
|
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
if proc.returncode != 0:
|
2019-04-22 23:40:40 +00:00
|
|
|
failures += 1
|
2021-08-06 14:38:28 +00:00
|
|
|
failures_chain += 1
|
2021-01-20 05:27:42 +00:00
|
|
|
status += MSG_FAIL
|
|
|
|
status += print_test_time(total_time)
|
2021-08-06 14:38:28 +00:00
|
|
|
status += ' - return code {}\n'.format(proc.returncode)
|
|
|
|
|
2020-12-15 13:33:14 +00:00
|
|
|
if stderr:
|
2021-01-20 05:27:42 +00:00
|
|
|
status += stderr
|
2019-04-22 23:40:40 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
# Stop on fatal errors like segmentation fault. They are sent to client via logs.
|
|
|
|
if ' <Fatal> ' in stderr:
|
|
|
|
server_died.set()
|
2019-04-22 23:40:40 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
if testcase_args.stop \
|
|
|
|
and ('Connection refused' in stderr or 'Attempt to read after eof' in stderr) \
|
|
|
|
and 'Received exception from server' not in stderr:
|
|
|
|
server_died.set()
|
2020-06-22 19:32:33 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
if os.path.isfile(stdout_file):
|
|
|
|
status += ", result:\n\n"
|
|
|
|
status += '\n'.join(
|
|
|
|
open(stdout_file).read().split('\n')[:100])
|
|
|
|
status += '\n'
|
2019-04-22 23:40:40 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
status += 'Database: ' + testcase_args.testcase_database
|
2020-08-15 10:03:51 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
elif stderr:
|
|
|
|
failures += 1
|
|
|
|
failures_chain += 1
|
|
|
|
status += MSG_FAIL
|
|
|
|
status += print_test_time(total_time)
|
|
|
|
status += " - having stderror:\n{}\n".format(
|
|
|
|
'\n'.join(stderr.split('\n')[:100]))
|
|
|
|
status += 'Database: ' + testcase_args.testcase_database
|
|
|
|
elif 'Exception' in stdout:
|
|
|
|
failures += 1
|
|
|
|
failures_chain += 1
|
|
|
|
status += MSG_FAIL
|
|
|
|
status += print_test_time(total_time)
|
2021-08-09 17:04:42 +00:00
|
|
|
status += " - having exception in stdout:\n{}\n".format(
|
2021-08-06 14:38:28 +00:00
|
|
|
'\n'.join(stdout.split('\n')[:100]))
|
|
|
|
status += 'Database: ' + testcase_args.testcase_database
|
2021-08-11 13:20:36 +00:00
|
|
|
elif '@@SKIP@@' in stdout:
|
|
|
|
skipped_total += 1
|
|
|
|
skip_reason = stdout.replace('@@SKIP@@', '').rstrip("\n")
|
|
|
|
status += MSG_SKIPPED + f" - {skip_reason}\n"
|
2021-08-06 14:38:28 +00:00
|
|
|
elif reference_file is None:
|
|
|
|
status += MSG_UNKNOWN
|
|
|
|
status += print_test_time(total_time)
|
|
|
|
status += " - no reference file\n"
|
|
|
|
status += 'Database: ' + testcase_args.testcase_database
|
|
|
|
else:
|
|
|
|
result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout=PIPE)
|
2021-04-14 06:08:47 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
if result_is_different:
|
|
|
|
diff = Popen(['diff', '-U', str(testcase_args.unified), reference_file, stdout_file], stdout=PIPE, universal_newlines=True).communicate()[0]
|
2019-04-22 23:40:40 +00:00
|
|
|
failures += 1
|
2021-01-20 05:27:42 +00:00
|
|
|
status += MSG_FAIL
|
|
|
|
status += print_test_time(total_time)
|
2021-08-06 14:38:28 +00:00
|
|
|
status += " - result differs with reference:\n{}\n".format(diff)
|
2021-04-14 06:08:47 +00:00
|
|
|
status += 'Database: ' + testcase_args.testcase_database
|
2019-04-22 23:40:40 +00:00
|
|
|
else:
|
2021-08-06 14:38:28 +00:00
|
|
|
if testcase_args.test_runs > 1 and total_time > 60 and 'long' not in name:
|
|
|
|
# We're in Flaky Check mode, check the run time as well while we're at it.
|
2019-04-22 23:40:40 +00:00
|
|
|
failures += 1
|
2021-08-06 14:38:28 +00:00
|
|
|
failures_chain += 1
|
2021-01-20 05:27:42 +00:00
|
|
|
status += MSG_FAIL
|
|
|
|
status += print_test_time(total_time)
|
2021-08-06 14:38:28 +00:00
|
|
|
status += " - Test runs too long (> 60s). Make it faster.\n"
|
2021-04-14 06:08:47 +00:00
|
|
|
status += 'Database: ' + testcase_args.testcase_database
|
2019-04-22 23:40:40 +00:00
|
|
|
else:
|
2021-08-06 14:38:28 +00:00
|
|
|
passed_total += 1
|
|
|
|
failures_chain = 0
|
|
|
|
status += MSG_OK
|
|
|
|
status += print_test_time(total_time)
|
|
|
|
status += "\n"
|
|
|
|
if os.path.exists(stdout_file):
|
|
|
|
os.remove(stdout_file)
|
|
|
|
if os.path.exists(stderr_file):
|
|
|
|
os.remove(stderr_file)
|
2021-01-20 05:27:42 +00:00
|
|
|
|
2021-01-28 08:55:20 +00:00
|
|
|
if status and not status.endswith('\n'):
|
|
|
|
status += '\n'
|
|
|
|
|
2021-01-20 05:27:42 +00:00
|
|
|
sys.stdout.write(status)
|
|
|
|
sys.stdout.flush()
|
2020-03-26 08:36:15 +00:00
|
|
|
except KeyboardInterrupt as e:
|
2019-04-22 23:40:40 +00:00
|
|
|
print(colored("Break tests execution", args, "red"))
|
2021-04-29 07:43:56 +00:00
|
|
|
stop_tests()
|
2020-03-26 08:36:15 +00:00
|
|
|
raise e
|
2019-04-22 23:40:40 +00:00
|
|
|
except:
|
|
|
|
exc_type, exc_value, tb = sys.exc_info()
|
|
|
|
failures += 1
|
2021-08-05 14:15:51 +00:00
|
|
|
|
|
|
|
exc_name = exc_type.__name__
|
|
|
|
traceback_str = "\n".join(traceback.format_tb(tb, 10))
|
|
|
|
|
|
|
|
print(f"{MSG_FAIL} - Test internal error: {exc_name}")
|
|
|
|
print(f"{exc_value}\n{traceback_str}")
|
2019-04-22 23:40:40 +00:00
|
|
|
|
|
|
|
if failures_chain >= 20:
|
2021-04-29 07:43:56 +00:00
|
|
|
stop_tests()
|
2019-04-22 23:40:40 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
failures_total = failures_total + failures
|
|
|
|
|
|
|
|
if failures_total > 0:
|
2021-03-29 18:14:06 +00:00
|
|
|
print(colored(f"\nHaving {failures_total} errors! {passed_total} tests passed."
|
2021-03-29 18:19:13 +00:00
|
|
|
f" {skipped_total} tests skipped. {(datetime.now() - start_time).total_seconds():.2f} s elapsed"
|
|
|
|
f' ({multiprocessing.current_process().name}).',
|
2021-03-29 18:14:06 +00:00
|
|
|
args, "red", attrs=["bold"]))
|
2021-06-21 11:21:26 +00:00
|
|
|
exit_code.value = 1
|
2019-04-22 23:40:40 +00:00
|
|
|
else:
|
2021-03-29 18:14:06 +00:00
|
|
|
print(colored(f"\n{passed_total} tests passed. {skipped_total} tests skipped."
|
2021-03-29 18:19:13 +00:00
|
|
|
f" {(datetime.now() - start_time).total_seconds():.2f} s elapsed"
|
|
|
|
f' ({multiprocessing.current_process().name}).',
|
2021-03-29 18:14:06 +00:00
|
|
|
args, "green", attrs=["bold"]))
|
2019-04-09 13:17:36 +00:00
|
|
|
|
2021-03-29 22:41:07 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
|
2019-04-22 23:40:40 +00:00
|
|
|
server_logs_level = "warning"
|
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
|
2020-05-29 10:08:11 +00:00
|
|
|
def check_server_started(client, retry_count):
|
2020-06-24 19:03:28 +00:00
|
|
|
print("Connecting to ClickHouse server...", end='')
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2020-06-24 19:03:28 +00:00
|
|
|
sys.stdout.flush()
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2020-05-29 10:08:11 +00:00
|
|
|
while retry_count > 0:
|
2021-08-05 14:15:51 +00:00
|
|
|
clickhouse_proc = open_client_process(client)
|
2020-10-02 16:54:07 +00:00
|
|
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT 1")
|
2020-06-24 19:03:28 +00:00
|
|
|
|
2020-10-02 16:54:07 +00:00
|
|
|
if clickhouse_proc.returncode == 0 and stdout.startswith(b"1"):
|
2020-06-24 19:03:28 +00:00
|
|
|
print(" OK")
|
|
|
|
sys.stdout.flush()
|
|
|
|
return True
|
|
|
|
|
|
|
|
if clickhouse_proc.returncode == 210:
|
|
|
|
# Connection refused, retry
|
2021-08-06 14:38:28 +00:00
|
|
|
print('.', end='')
|
2020-06-24 19:03:28 +00:00
|
|
|
sys.stdout.flush()
|
2020-05-29 10:08:11 +00:00
|
|
|
retry_count -= 1
|
|
|
|
sleep(0.5)
|
2020-06-24 19:03:28 +00:00
|
|
|
continue
|
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
# FIXME Some old comment, maybe now CH supports Python3 ?
|
2020-06-24 19:03:28 +00:00
|
|
|
# We can't print this, because for some reason this is python 2,
|
|
|
|
# and args appeared in 3.3. To hell with it.
|
|
|
|
# print(''.join(clickhouse_proc.args))
|
2021-08-06 14:38:28 +00:00
|
|
|
|
|
|
|
# Other kind of error, fail.
|
|
|
|
|
|
|
|
code: int = clickhouse_proc.returncode
|
|
|
|
|
|
|
|
print(f"\nClient invocation failed with code {code}:\n\
|
|
|
|
stdout: {stdout}\n\
|
|
|
|
stderr: {stderr}")
|
|
|
|
|
2020-06-24 19:03:28 +00:00
|
|
|
sys.stdout.flush()
|
2021-08-06 14:38:28 +00:00
|
|
|
|
2020-06-24 19:03:28 +00:00
|
|
|
return False
|
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
print('\nAll connection tries failed')
|
2020-06-24 19:03:28 +00:00
|
|
|
sys.stdout.flush()
|
2020-05-29 10:08:11 +00:00
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2021-01-26 20:36:04 +00:00
|
|
|
class BuildFlags():
|
2020-07-03 10:57:16 +00:00
|
|
|
THREAD = 'thread-sanitizer'
|
|
|
|
ADDRESS = 'address-sanitizer'
|
|
|
|
UNDEFINED = 'ub-sanitizer'
|
|
|
|
MEMORY = 'memory-sanitizer'
|
|
|
|
DEBUG = 'debug-build'
|
|
|
|
UNBUNDLED = 'unbundled-build'
|
|
|
|
RELEASE = 'release-build'
|
2020-09-24 23:04:01 +00:00
|
|
|
DATABASE_ORDINARY = 'database-ordinary'
|
2020-07-03 10:57:16 +00:00
|
|
|
POLYMORPHIC_PARTS = 'polymorphic-parts'
|
2021-02-15 10:26:34 +00:00
|
|
|
DATABASE_REPLICATED = 'database-replicated'
|
2020-07-03 10:57:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
def collect_build_flags(client):
|
2021-08-05 14:15:51 +00:00
|
|
|
clickhouse_proc = open_client_process(client)
|
2020-10-02 16:54:07 +00:00
|
|
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'")
|
2020-07-03 10:57:16 +00:00
|
|
|
result = []
|
|
|
|
|
|
|
|
if clickhouse_proc.returncode == 0:
|
2020-10-02 16:54:07 +00:00
|
|
|
if b'-fsanitize=thread' in stdout:
|
2020-07-03 10:57:16 +00:00
|
|
|
result.append(BuildFlags.THREAD)
|
2020-10-02 16:54:07 +00:00
|
|
|
elif b'-fsanitize=address' in stdout:
|
2020-07-03 10:57:16 +00:00
|
|
|
result.append(BuildFlags.ADDRESS)
|
2020-10-02 16:54:07 +00:00
|
|
|
elif b'-fsanitize=undefined' in stdout:
|
2020-07-03 10:57:16 +00:00
|
|
|
result.append(BuildFlags.UNDEFINED)
|
2020-10-02 16:54:07 +00:00
|
|
|
elif b'-fsanitize=memory' in stdout:
|
2020-07-03 10:57:16 +00:00
|
|
|
result.append(BuildFlags.MEMORY)
|
|
|
|
else:
|
2020-10-12 11:17:35 +00:00
|
|
|
raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
2020-07-03 10:57:16 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
clickhouse_proc = open_client_process(client)
|
2020-10-02 16:54:07 +00:00
|
|
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'BUILD_TYPE'")
|
2020-07-03 10:57:16 +00:00
|
|
|
|
|
|
|
if clickhouse_proc.returncode == 0:
|
2020-10-02 16:54:07 +00:00
|
|
|
if b'Debug' in stdout:
|
2020-07-03 10:57:16 +00:00
|
|
|
result.append(BuildFlags.DEBUG)
|
2020-10-02 16:54:07 +00:00
|
|
|
elif b'RelWithDebInfo' in stdout or b'Release' in stdout:
|
2020-07-03 10:57:16 +00:00
|
|
|
result.append(BuildFlags.RELEASE)
|
|
|
|
else:
|
2020-10-12 11:17:35 +00:00
|
|
|
raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
2020-07-03 10:57:16 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
clickhouse_proc = open_client_process(client)
|
2020-10-02 16:54:07 +00:00
|
|
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'UNBUNDLED'")
|
2020-07-03 10:57:16 +00:00
|
|
|
|
|
|
|
if clickhouse_proc.returncode == 0:
|
2020-10-02 16:54:07 +00:00
|
|
|
if b'ON' in stdout or b'1' in stdout:
|
2020-07-03 10:57:16 +00:00
|
|
|
result.append(BuildFlags.UNBUNDLED)
|
|
|
|
else:
|
2020-10-12 11:17:35 +00:00
|
|
|
raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
2020-07-03 10:57:16 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
clickhouse_proc = open_client_process(client)
|
2020-10-02 16:54:07 +00:00
|
|
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.settings WHERE name = 'default_database_engine'")
|
2020-07-03 10:57:16 +00:00
|
|
|
|
|
|
|
if clickhouse_proc.returncode == 0:
|
2020-10-02 16:54:07 +00:00
|
|
|
if b'Ordinary' in stdout:
|
2020-09-24 23:04:01 +00:00
|
|
|
result.append(BuildFlags.DATABASE_ORDINARY)
|
2020-07-03 10:57:16 +00:00
|
|
|
else:
|
2020-10-12 11:17:35 +00:00
|
|
|
raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
2020-07-03 10:57:16 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
clickhouse_proc = open_client_process(client)
|
2020-10-07 18:53:34 +00:00
|
|
|
(stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.merge_tree_settings WHERE name = 'min_bytes_for_wide_part'")
|
|
|
|
|
|
|
|
if clickhouse_proc.returncode == 0:
|
|
|
|
if stdout == b'0\n':
|
|
|
|
result.append(BuildFlags.POLYMORPHIC_PARTS)
|
|
|
|
else:
|
|
|
|
raise Exception("Cannot get inforamtion about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
|
|
|
|
2020-07-03 10:57:16 +00:00
|
|
|
return result
|
|
|
|
|
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
def suite_key_func(item: str) -> Union[int, Tuple[int, str]]:
|
|
|
|
if args.order == 'random':
|
|
|
|
return random.random()
|
|
|
|
|
|
|
|
if -1 == item.find('_'):
|
|
|
|
return 99998, ''
|
|
|
|
|
|
|
|
prefix, suffix = item.split('_', 1)
|
|
|
|
|
|
|
|
try:
|
|
|
|
return int(prefix), suffix
|
|
|
|
except ValueError:
|
|
|
|
return 99997, ''
|
|
|
|
|
|
|
|
|
|
|
|
def tests_in_suite_key_func(item: str) -> int:
|
|
|
|
if args.order == 'random':
|
|
|
|
return random.random()
|
|
|
|
|
|
|
|
reverse = 1 if args.order == 'asc' else -1
|
|
|
|
|
|
|
|
if -1 == item.find('_'):
|
|
|
|
return 99998
|
|
|
|
|
|
|
|
prefix, _ = item.split('_', 1)
|
|
|
|
|
|
|
|
try:
|
|
|
|
return reverse * int(prefix)
|
|
|
|
except ValueError:
|
|
|
|
return 99997
|
|
|
|
|
|
|
|
|
|
|
|
def extract_key(key: str) -> str:
|
|
|
|
return subprocess.getstatusoutput(
|
|
|
|
args.extract_from_config +
|
|
|
|
" --try --config " +
|
|
|
|
args.configserver + key)[1]
|
|
|
|
|
|
|
|
|
|
|
|
def open_client_process(
|
|
|
|
client_args: str,
|
|
|
|
universal_newlines: bool = False,
|
|
|
|
stderr_file: Optional[TextIO] = None):
|
|
|
|
return Popen(
|
|
|
|
shlex.split(client_args), stdin=PIPE, stdout=PIPE,
|
|
|
|
stderr=stderr_file if stderr_file is not None else PIPE,
|
|
|
|
universal_newlines=True if universal_newlines else None)
|
|
|
|
|
|
|
|
|
|
|
|
|
2021-05-20 18:11:12 +00:00
|
|
|
def do_run_tests(jobs, suite, suite_dir, suite_tmp_dir, all_tests, parallel_tests, sequential_tests, parallel):
|
|
|
|
if jobs > 1 and len(parallel_tests) > 0:
|
|
|
|
print("Found", len(parallel_tests), "parallel tests and", len(sequential_tests), "sequential tests")
|
|
|
|
run_n, run_total = parallel.split('/')
|
|
|
|
run_n = float(run_n)
|
|
|
|
run_total = float(run_total)
|
|
|
|
tests_n = len(parallel_tests)
|
|
|
|
if run_total > tests_n:
|
|
|
|
run_total = tests_n
|
|
|
|
|
|
|
|
if jobs > tests_n:
|
|
|
|
jobs = tests_n
|
|
|
|
if jobs > run_total:
|
|
|
|
run_total = jobs
|
|
|
|
|
|
|
|
batch_size = max(1, len(parallel_tests) // jobs)
|
|
|
|
parallel_tests_array = []
|
|
|
|
for _ in range(jobs):
|
|
|
|
parallel_tests_array.append((None, batch_size, suite, suite_dir, suite_tmp_dir))
|
|
|
|
|
|
|
|
with closing(multiprocessing.Pool(processes=jobs)) as pool:
|
|
|
|
pool.map_async(run_tests_array, parallel_tests_array)
|
|
|
|
|
|
|
|
for suit in parallel_tests:
|
|
|
|
queue.put(suit)
|
|
|
|
|
|
|
|
for _ in range(jobs):
|
|
|
|
queue.put(None)
|
|
|
|
|
|
|
|
queue.close()
|
|
|
|
|
|
|
|
pool.join()
|
|
|
|
|
|
|
|
run_tests_array((sequential_tests, len(sequential_tests), suite, suite_dir, suite_tmp_dir))
|
2021-05-20 18:18:44 +00:00
|
|
|
return len(sequential_tests) + len(parallel_tests)
|
2021-05-20 18:11:12 +00:00
|
|
|
else:
|
2021-05-20 19:57:06 +00:00
|
|
|
num_tests = len(all_tests)
|
|
|
|
run_tests_array((all_tests, num_tests, suite, suite_dir, suite_tmp_dir))
|
|
|
|
return num_tests
|
2021-05-20 18:11:12 +00:00
|
|
|
|
|
|
|
|
2021-07-20 13:40:04 +00:00
|
|
|
def is_test_from_dir(suite_dir, case):
|
|
|
|
case_file = os.path.join(suite_dir, case)
|
|
|
|
# We could also test for executable files (os.access(case_file, os.X_OK),
|
|
|
|
# but it interferes with 01610_client_spawn_editor.editor, which is invoked
|
|
|
|
# as a query editor in the test, and must be marked as executable.
|
|
|
|
return os.path.isfile(case_file) and any(case_file.endswith(suppotred_ext) for suppotred_ext in TEST_FILE_EXTENSIONS)
|
|
|
|
|
|
|
|
|
2021-07-20 14:51:22 +00:00
|
|
|
def removesuffix(text, *suffixes):
|
2021-07-20 13:40:04 +00:00
|
|
|
"""
|
|
|
|
Added in python 3.9
|
|
|
|
https://www.python.org/dev/peps/pep-0616/
|
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
This version can work with several possible suffixes
|
2021-07-20 13:40:04 +00:00
|
|
|
"""
|
|
|
|
for suffix in suffixes:
|
2021-07-20 14:51:22 +00:00
|
|
|
if suffix and text.endswith(suffix):
|
|
|
|
return text[:-len(suffix)]
|
|
|
|
return text
|
2021-07-20 13:40:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
def render_test_template(j2env, suite_dir, test_name):
|
|
|
|
"""
|
|
|
|
Render template for test and reference file if needed
|
|
|
|
"""
|
|
|
|
|
|
|
|
if j2env is None:
|
|
|
|
return test_name
|
|
|
|
|
|
|
|
test_base_name = removesuffix(test_name, ".sql.j2", ".sql")
|
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
reference_file_name = test_base_name + ".reference.j2"
|
2021-07-20 13:40:04 +00:00
|
|
|
reference_file_path = os.path.join(suite_dir, reference_file_name)
|
|
|
|
if os.path.isfile(reference_file_path):
|
|
|
|
tpl = j2env.get_template(reference_file_name)
|
|
|
|
tpl.stream().dump(os.path.join(suite_dir, test_base_name) + ".gen.reference")
|
|
|
|
|
|
|
|
if test_name.endswith(".sql.j2"):
|
|
|
|
tpl = j2env.get_template(test_name)
|
|
|
|
generated_test_name = test_base_name + ".gen.sql"
|
|
|
|
tpl.stream().dump(os.path.join(suite_dir, generated_test_name))
|
|
|
|
return generated_test_name
|
|
|
|
|
|
|
|
return test_name
|
|
|
|
|
|
|
|
|
|
|
|
def get_selected_tests(suite_dir, patterns):
|
|
|
|
"""
|
|
|
|
Find all files with tests, filter, render templates
|
|
|
|
"""
|
|
|
|
|
|
|
|
j2env = jinja2.Environment(
|
|
|
|
loader=jinja2.FileSystemLoader(suite_dir),
|
|
|
|
keep_trailing_newline=True,
|
|
|
|
) if USE_JINJA else None
|
|
|
|
|
|
|
|
for test_name in os.listdir(suite_dir):
|
|
|
|
if not is_test_from_dir(suite_dir, test_name):
|
|
|
|
continue
|
|
|
|
if patterns and not any(re.search(pattern, test_name) for pattern in patterns):
|
|
|
|
continue
|
|
|
|
if USE_JINJA and test_name.endswith(".gen.sql"):
|
|
|
|
continue
|
|
|
|
test_name = render_test_template(j2env, suite_dir, test_name)
|
|
|
|
yield test_name
|
|
|
|
|
|
|
|
|
|
|
|
def get_tests_list(suite_dir, patterns, test_runs, sort_key):
|
|
|
|
"""
|
|
|
|
Return list of tests file names to run
|
|
|
|
"""
|
|
|
|
|
|
|
|
all_tests = list(get_selected_tests(suite_dir, patterns))
|
|
|
|
all_tests = all_tests * test_runs
|
|
|
|
all_tests.sort(key=sort_key)
|
|
|
|
return all_tests
|
|
|
|
|
|
|
|
|
|
|
|
def get_reference_file(suite_dir, name):
|
|
|
|
"""
|
|
|
|
Returns reference file name for specified test
|
|
|
|
"""
|
|
|
|
|
|
|
|
name = removesuffix(name, ".gen")
|
|
|
|
for ext in ['.reference', '.gen.reference']:
|
|
|
|
reference_file = os.path.join(suite_dir, name) + ext
|
|
|
|
if os.path.isfile(reference_file):
|
|
|
|
return reference_file
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2019-04-22 23:40:40 +00:00
|
|
|
def main(args):
|
2021-06-21 11:21:26 +00:00
|
|
|
global server_died
|
2020-08-26 17:44:03 +00:00
|
|
|
global stop_time
|
2019-04-22 23:40:40 +00:00
|
|
|
global exit_code
|
|
|
|
global server_logs_level
|
2019-04-09 13:17:36 +00:00
|
|
|
|
2017-05-01 21:27:11 +00:00
|
|
|
def is_data_present():
|
2021-08-05 14:15:51 +00:00
|
|
|
clickhouse_proc = open_client_process(args.client)
|
2020-10-02 16:54:07 +00:00
|
|
|
(stdout, stderr) = clickhouse_proc.communicate(b"EXISTS TABLE test.hits")
|
2018-01-16 20:17:31 +00:00
|
|
|
if clickhouse_proc.returncode != 0:
|
2017-11-01 11:46:58 +00:00
|
|
|
raise CalledProcessError(clickhouse_proc.returncode, args.client, stderr)
|
2017-05-01 21:27:11 +00:00
|
|
|
|
2020-10-02 16:54:07 +00:00
|
|
|
return stdout.startswith(b'1')
|
2017-05-01 21:27:11 +00:00
|
|
|
|
2020-05-29 10:08:11 +00:00
|
|
|
if not check_server_started(args.client, args.server_check_retries):
|
2020-10-12 11:17:35 +00:00
|
|
|
raise Exception(
|
|
|
|
"Server is not responding. Cannot execute 'SELECT 1' query. \
|
2021-08-05 14:15:51 +00:00
|
|
|
If you are using split build, you have to specify -c option.")
|
2020-10-12 11:17:35 +00:00
|
|
|
|
2020-07-03 10:57:16 +00:00
|
|
|
build_flags = collect_build_flags(args.client)
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-02-15 10:26:34 +00:00
|
|
|
if args.replicated_database:
|
|
|
|
build_flags.append(BuildFlags.DATABASE_REPLICATED)
|
2020-10-12 11:17:35 +00:00
|
|
|
|
2020-07-03 11:15:30 +00:00
|
|
|
if args.use_skip_list:
|
|
|
|
tests_to_skip_from_list = collect_tests_to_skip(args.skip_list_path, build_flags)
|
|
|
|
else:
|
2021-08-06 14:38:28 +00:00
|
|
|
tests_to_skip_from_list = set()
|
2020-07-03 11:15:30 +00:00
|
|
|
|
2020-07-03 10:57:16 +00:00
|
|
|
if args.skip:
|
|
|
|
args.skip = set(args.skip) | tests_to_skip_from_list
|
|
|
|
else:
|
|
|
|
args.skip = tests_to_skip_from_list
|
2020-05-29 10:08:11 +00:00
|
|
|
|
2020-09-24 14:54:10 +00:00
|
|
|
if args.use_skip_list and not args.sequential:
|
|
|
|
args.sequential = collect_sequential_list(args.skip_list_path)
|
|
|
|
|
2018-01-18 20:33:16 +00:00
|
|
|
base_dir = os.path.abspath(args.queries)
|
|
|
|
tmp_dir = os.path.abspath(args.tmp)
|
|
|
|
|
2018-06-08 19:50:15 +00:00
|
|
|
# Keep same default values as in queries/shell_config.sh
|
2018-01-18 20:33:16 +00:00
|
|
|
os.environ.setdefault("CLICKHOUSE_BINARY", args.binary)
|
2021-08-06 14:38:28 +00:00
|
|
|
# os.environ.setdefault("CLICKHOUSE_CLIENT", args.client)
|
2018-04-26 20:02:10 +00:00
|
|
|
os.environ.setdefault("CLICKHOUSE_CONFIG", args.configserver)
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2018-06-18 21:13:11 +00:00
|
|
|
if args.configclient:
|
|
|
|
os.environ.setdefault("CLICKHOUSE_CONFIG_CLIENT", args.configclient)
|
2017-05-01 21:27:11 +00:00
|
|
|
|
2018-06-13 19:01:07 +00:00
|
|
|
# Force to print server warnings in stderr
|
2018-08-14 20:29:42 +00:00
|
|
|
# Shell scripts could change logging level
|
2018-06-13 19:01:07 +00:00
|
|
|
os.environ.setdefault("CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL", server_logs_level)
|
|
|
|
|
2020-08-26 17:44:03 +00:00
|
|
|
# This code is bad as the time is not monotonic
|
|
|
|
if args.global_time_limit:
|
|
|
|
stop_time = time() + args.global_time_limit
|
|
|
|
|
2017-05-01 21:27:11 +00:00
|
|
|
if args.zookeeper is None:
|
2018-03-27 15:08:01 +00:00
|
|
|
try:
|
2021-08-05 14:15:51 +00:00
|
|
|
args.zookeeper = int(extract_key(" --key zookeeper | grep . | wc -l")) > 0
|
2018-03-27 15:08:01 +00:00
|
|
|
except ValueError:
|
2017-05-01 21:27:11 +00:00
|
|
|
args.zookeeper = False
|
|
|
|
|
|
|
|
if args.shard is None:
|
2021-08-05 14:15:51 +00:00
|
|
|
args.shard = bool(extract_key(' --key listen_host | grep -E "127.0.0.2|::"'))
|
2017-05-01 21:27:11 +00:00
|
|
|
|
2021-06-22 11:50:09 +00:00
|
|
|
def create_common_database(args, db_name):
|
|
|
|
create_database_retries = 0
|
|
|
|
while create_database_retries < MAX_RETRIES:
|
2021-07-29 12:16:34 +00:00
|
|
|
client_cmd = args.client + " " + get_additional_client_options(args)
|
2021-08-05 14:15:51 +00:00
|
|
|
|
|
|
|
clickhouse_proc_create = open_client_process(client_cmd, universal_newlines=True)
|
|
|
|
|
2021-07-26 16:19:47 +00:00
|
|
|
(stdout, stderr) = clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS " + db_name + get_db_engine(args, db_name)))
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-07-26 16:19:47 +00:00
|
|
|
if not need_retry(stdout, stderr):
|
2021-06-22 11:50:09 +00:00
|
|
|
break
|
|
|
|
create_database_retries += 1
|
2020-05-13 20:17:12 +00:00
|
|
|
|
2021-06-22 11:50:09 +00:00
|
|
|
if args.database and args.database != "test":
|
|
|
|
create_common_database(args, args.database)
|
2019-03-14 16:37:23 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
create_common_database(args, "test")
|
2019-03-14 16:37:23 +00:00
|
|
|
|
2019-10-09 10:51:05 +00:00
|
|
|
total_tests_run = 0
|
2021-08-05 14:15:51 +00:00
|
|
|
|
|
|
|
for suite in sorted(os.listdir(base_dir), key=suite_key_func):
|
2021-06-21 11:21:26 +00:00
|
|
|
if server_died.is_set():
|
2017-05-01 21:27:11 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
suite_dir = os.path.join(base_dir, suite)
|
|
|
|
suite_re_obj = re.search('^[0-9]+_(.*)$', suite)
|
2021-08-06 14:38:28 +00:00
|
|
|
if not suite_re_obj: # skip .gitignore and so on
|
2017-05-01 21:27:11 +00:00
|
|
|
continue
|
2017-12-27 15:56:42 +00:00
|
|
|
|
|
|
|
suite_tmp_dir = os.path.join(tmp_dir, suite)
|
|
|
|
if not os.path.exists(suite_tmp_dir):
|
|
|
|
os.makedirs(suite_tmp_dir)
|
|
|
|
|
2017-05-01 21:27:11 +00:00
|
|
|
suite = suite_re_obj.group(1)
|
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
if os.path.isdir(suite_dir):
|
2019-04-23 12:51:27 +00:00
|
|
|
if 'stateful' in suite and not args.no_stateful and not is_data_present():
|
2018-08-08 03:03:26 +00:00
|
|
|
print("Won't run stateful tests because test data wasn't loaded.")
|
2017-05-01 21:27:11 +00:00
|
|
|
continue
|
2019-01-18 11:47:50 +00:00
|
|
|
if 'stateless' in suite and args.no_stateless:
|
|
|
|
print("Won't run stateless tests because they were manually disabled.")
|
|
|
|
continue
|
2019-04-18 18:48:04 +00:00
|
|
|
if 'stateful' in suite and args.no_stateful:
|
|
|
|
print("Won't run stateful tests because they were manually disabled.")
|
|
|
|
continue
|
2017-05-01 21:27:11 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
all_tests = get_tests_list(
|
|
|
|
suite_dir, args.test, args.test_runs, tests_in_suite_key_func)
|
2019-04-11 17:20:36 +00:00
|
|
|
|
2020-12-21 11:19:12 +00:00
|
|
|
jobs = args.jobs
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2020-09-24 14:54:10 +00:00
|
|
|
parallel_tests = []
|
|
|
|
sequential_tests = []
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2020-09-24 14:54:10 +00:00
|
|
|
for test in all_tests:
|
|
|
|
if any(s in test for s in args.sequential):
|
|
|
|
sequential_tests.append(test)
|
|
|
|
else:
|
|
|
|
parallel_tests.append(test)
|
|
|
|
|
2021-05-20 18:18:44 +00:00
|
|
|
total_tests_run += do_run_tests(
|
|
|
|
jobs, suite, suite_dir, suite_tmp_dir, all_tests, parallel_tests, sequential_tests, args.parallel)
|
2019-10-09 10:51:05 +00:00
|
|
|
|
2019-03-13 11:03:57 +00:00
|
|
|
if args.hung_check:
|
2020-05-31 14:57:29 +00:00
|
|
|
|
|
|
|
# Some queries may execute in background for some time after test was finished. This is normal.
|
2021-01-26 20:36:04 +00:00
|
|
|
for _ in range(1, 60):
|
2021-01-25 08:53:04 +00:00
|
|
|
timeout, processlist = get_processlist(args)
|
|
|
|
if timeout or not processlist:
|
2020-05-31 14:57:29 +00:00
|
|
|
break
|
|
|
|
sleep(1)
|
|
|
|
|
2021-01-25 08:53:04 +00:00
|
|
|
if timeout or processlist:
|
|
|
|
if processlist:
|
|
|
|
print(colored("\nFound hung queries in processlist:", args, "red", attrs=["bold"]))
|
|
|
|
print(processlist)
|
|
|
|
else:
|
2021-01-26 20:23:00 +00:00
|
|
|
print(colored("Seems like server hung and cannot respond to queries", args, "red", attrs=["bold"]))
|
2020-01-27 18:04:12 +00:00
|
|
|
|
2021-02-19 14:38:20 +00:00
|
|
|
|
2021-08-05 14:15:51 +00:00
|
|
|
print_stacktraces()
|
2021-06-21 11:21:26 +00:00
|
|
|
exit_code.value = 1
|
2019-03-13 13:52:23 +00:00
|
|
|
else:
|
2020-01-27 18:04:12 +00:00
|
|
|
print(colored("\nNo queries hung.", args, "green", attrs=["bold"]))
|
2019-03-13 11:03:57 +00:00
|
|
|
|
2021-06-16 10:26:04 +00:00
|
|
|
if len(restarted_tests) > 0:
|
2021-06-15 20:52:29 +00:00
|
|
|
print("\nSome tests were restarted:\n")
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-06-15 20:52:29 +00:00
|
|
|
for (test_case, stderr) in restarted_tests:
|
2021-08-05 14:15:51 +00:00
|
|
|
print(test_case + "\n" + stderr + "\n")
|
2021-06-15 20:52:29 +00:00
|
|
|
|
2019-10-09 10:51:05 +00:00
|
|
|
if total_tests_run == 0:
|
|
|
|
print("No tests were run.")
|
|
|
|
sys.exit(1)
|
2021-08-06 14:38:28 +00:00
|
|
|
else:
|
|
|
|
print("All tests have finished.")
|
2019-10-09 10:51:05 +00:00
|
|
|
|
2021-06-21 11:21:26 +00:00
|
|
|
sys.exit(exit_code.value)
|
2019-03-13 11:03:57 +00:00
|
|
|
|
2016-12-06 20:55:13 +00:00
|
|
|
|
2019-01-24 11:02:55 +00:00
|
|
|
def find_binary(name):
|
2019-03-25 15:03:12 +00:00
|
|
|
if os.path.exists(name) and os.access(name, os.X_OK):
|
|
|
|
return True
|
2019-01-24 11:02:55 +00:00
|
|
|
paths = os.environ.get("PATH").split(':')
|
|
|
|
for path in paths:
|
|
|
|
if os.access(os.path.join(path, name), os.X_OK):
|
|
|
|
return True
|
|
|
|
|
|
|
|
# maybe it wasn't in PATH
|
2019-06-17 16:50:31 +00:00
|
|
|
if os.access(os.path.join('/usr/local/bin', name), os.X_OK):
|
|
|
|
return True
|
|
|
|
if os.access(os.path.join('/usr/bin', name), os.X_OK):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2016-09-01 17:40:02 +00:00
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
def get_additional_client_options(args):
|
2019-10-11 13:34:26 +00:00
|
|
|
if args.client_option:
|
|
|
|
return ' '.join('--' + option for option in args.client_option)
|
|
|
|
|
|
|
|
return ''
|
|
|
|
|
|
|
|
|
|
|
|
def get_additional_client_options_url(args):
|
|
|
|
if args.client_option:
|
|
|
|
return '&'.join(args.client_option)
|
|
|
|
|
|
|
|
return ''
|
2019-10-11 10:30:32 +00:00
|
|
|
|
|
|
|
|
2020-07-03 10:57:16 +00:00
|
|
|
def collect_tests_to_skip(skip_list_path, build_flags):
|
|
|
|
result = set([])
|
2021-08-06 14:38:28 +00:00
|
|
|
|
2020-07-03 10:57:16 +00:00
|
|
|
if not os.path.exists(skip_list_path):
|
|
|
|
return result
|
|
|
|
|
|
|
|
with open(skip_list_path, 'r') as skip_list_file:
|
2020-08-27 08:17:01 +00:00
|
|
|
content = skip_list_file.read()
|
2021-08-06 14:38:28 +00:00
|
|
|
|
2020-08-27 08:17:01 +00:00
|
|
|
# allows to have comments in skip_list.json
|
|
|
|
skip_dict = json.loads(json_minify(content))
|
2021-08-06 14:38:28 +00:00
|
|
|
|
2020-07-03 10:57:16 +00:00
|
|
|
for build_flag in build_flags:
|
|
|
|
result |= set(skip_dict[build_flag])
|
2020-07-03 11:05:27 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
count = len(result)
|
|
|
|
|
|
|
|
if count > 0:
|
|
|
|
print(f"Found file with skip-list {skip_list_path}, {count} test will be skipped")
|
2020-07-03 11:05:27 +00:00
|
|
|
|
2020-07-03 10:57:16 +00:00
|
|
|
return result
|
|
|
|
|
2020-09-24 14:54:10 +00:00
|
|
|
|
|
|
|
def collect_sequential_list(skip_list_path):
|
|
|
|
if not os.path.exists(skip_list_path):
|
|
|
|
return set([])
|
|
|
|
|
|
|
|
with open(skip_list_path, 'r') as skip_list_file:
|
|
|
|
content = skip_list_file.read()
|
|
|
|
# allows to have comments in skip_list.json
|
|
|
|
skip_dict = json.loads(json_minify(content))
|
2020-09-25 13:20:16 +00:00
|
|
|
if 'parallel' in skip_dict:
|
|
|
|
return skip_dict['parallel']
|
2020-09-24 14:54:10 +00:00
|
|
|
return set([])
|
|
|
|
|
|
|
|
|
2016-09-01 17:40:02 +00:00
|
|
|
if __name__ == '__main__':
|
2021-04-29 07:43:56 +00:00
|
|
|
# Move to a new process group and kill it at exit so that we don't have any
|
|
|
|
# infinite tests processes left
|
|
|
|
# (new process group is required to avoid killing some parent processes)
|
|
|
|
os.setpgid(0, 0)
|
|
|
|
signal.signal(signal.SIGTERM, signal_handler)
|
|
|
|
signal.signal(signal.SIGINT, signal_handler)
|
|
|
|
signal.signal(signal.SIGHUP, signal_handler)
|
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
parser = ArgumentParser(description='ClickHouse functional tests')
|
2019-03-13 11:03:57 +00:00
|
|
|
parser.add_argument('-q', '--queries', help='Path to queries dir')
|
|
|
|
parser.add_argument('--tmp', help='Path to tmp dir')
|
2020-10-12 11:17:35 +00:00
|
|
|
|
|
|
|
parser.add_argument('-b', '--binary', default='clickhouse',
|
2021-01-15 21:33:53 +00:00
|
|
|
help='Path to clickhouse (if monolithic build, clickhouse-server otherwise) binary or name of binary in PATH')
|
2020-10-12 11:17:35 +00:00
|
|
|
|
|
|
|
parser.add_argument('-c', '--client',
|
2021-01-15 21:33:53 +00:00
|
|
|
help='Path to clickhouse-client (if split build, useless otherwise) binary of name of binary in PATH')
|
2020-10-12 11:17:35 +00:00
|
|
|
|
2019-03-13 11:03:57 +00:00
|
|
|
parser.add_argument('--extract_from_config', help='extract-from-config program')
|
|
|
|
parser.add_argument('--configclient', help='Client config (if you use not default ports)')
|
2021-08-06 14:38:28 +00:00
|
|
|
parser.add_argument('--configserver', default='/etc/clickhouse-server/config.xml', help='Preprocessed server config')
|
2019-03-13 11:03:57 +00:00
|
|
|
parser.add_argument('-o', '--output', help='Output xUnit compliant test report directory')
|
|
|
|
parser.add_argument('-t', '--timeout', type=int, default=600, help='Timeout for each test case in seconds')
|
2020-08-26 17:44:03 +00:00
|
|
|
parser.add_argument('--global_time_limit', type=int, help='Stop if executing more than specified time (after current test finished)')
|
2019-10-04 13:38:06 +00:00
|
|
|
parser.add_argument('test', nargs='*', help='Optional test case name regex')
|
2019-03-13 11:03:57 +00:00
|
|
|
parser.add_argument('-d', '--disabled', action='store_true', default=False, help='Also run disabled tests')
|
|
|
|
parser.add_argument('--stop', action='store_true', default=None, dest='stop', help='Stop on network errors')
|
2019-12-13 14:27:57 +00:00
|
|
|
parser.add_argument('--order', default='desc', choices=['asc', 'desc', 'random'], help='Run order')
|
2019-03-13 11:03:57 +00:00
|
|
|
parser.add_argument('--testname', action='store_true', default=None, dest='testname', help='Make query with test name before test run')
|
|
|
|
parser.add_argument('--hung-check', action='store_true', default=False)
|
2019-04-09 13:17:36 +00:00
|
|
|
parser.add_argument('--force-color', action='store_true', default=False)
|
2019-06-10 10:41:53 +00:00
|
|
|
parser.add_argument('--database', help='Database for tests (random name test_XXXXXX by default)')
|
2021-01-26 17:51:25 +00:00
|
|
|
parser.add_argument('--no-drop-if-fail', action='store_true', help='Do not drop database for test if test has failed')
|
2021-06-03 18:10:58 +00:00
|
|
|
parser.add_argument('--hide-db-name', action='store_true', help='Replace random database name with "default" in stderr')
|
2019-04-22 23:40:40 +00:00
|
|
|
parser.add_argument('--parallel', default='1/1', help='One parallel test run number/total')
|
2019-06-20 09:12:49 +00:00
|
|
|
parser.add_argument('-j', '--jobs', default=1, nargs='?', type=int, help='Run all tests in parallel')
|
2021-01-27 15:24:39 +00:00
|
|
|
parser.add_argument('--test-runs', default=1, nargs='?', type=int, help='Run each test many times (useful for e.g. flaky check)')
|
2019-10-10 18:47:51 +00:00
|
|
|
parser.add_argument('-U', '--unified', default=3, type=int, help='output NUM lines of unified context')
|
2020-05-29 10:08:11 +00:00
|
|
|
parser.add_argument('-r', '--server-check-retries', default=30, type=int, help='Num of tries to execute SELECT 1 before tests started')
|
2020-07-03 11:15:30 +00:00
|
|
|
parser.add_argument('--skip-list-path', help="Path to skip-list file")
|
|
|
|
parser.add_argument('--use-skip-list', action='store_true', default=False, help="Use skip list to skip tests if found")
|
2020-09-21 10:24:10 +00:00
|
|
|
parser.add_argument('--db-engine', help='Database engine name')
|
2021-02-15 10:26:34 +00:00
|
|
|
parser.add_argument('--replicated-database', action='store_true', default=False, help='Run tests with Replicated database engine')
|
2019-03-13 11:03:57 +00:00
|
|
|
parser.add_argument('--no-stateless', action='store_true', help='Disable all stateless tests')
|
2019-04-18 18:48:04 +00:00
|
|
|
parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests')
|
2019-03-13 11:03:57 +00:00
|
|
|
parser.add_argument('--skip', nargs='+', help="Skip these tests")
|
2020-09-24 14:54:10 +00:00
|
|
|
parser.add_argument('--sequential', nargs='+', help="Run these tests sequentially even if --parallel specified")
|
2021-08-06 14:38:28 +00:00
|
|
|
parser.add_argument('--no-long', action='store_true', dest='no_long', help='Do not run long tests')
|
2019-11-11 15:57:33 +00:00
|
|
|
parser.add_argument('--client-option', nargs='+', help='Specify additional client argument')
|
2019-12-03 09:59:41 +00:00
|
|
|
parser.add_argument('--print-time', action='store_true', dest='print_time', help='Print test time')
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
group = parser.add_mutually_exclusive_group(required=False)
|
2019-03-13 11:03:57 +00:00
|
|
|
group.add_argument('--zookeeper', action='store_true', default=None, dest='zookeeper', help='Run zookeeper related tests')
|
|
|
|
group.add_argument('--no-zookeeper', action='store_false', default=None, dest='zookeeper', help='Do not run zookeeper related tests')
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
group = parser.add_mutually_exclusive_group(required=False)
|
2019-03-13 11:03:57 +00:00
|
|
|
group.add_argument('--shard', action='store_true', default=None, dest='shard', help='Run sharding related tests (required to clickhouse-server listen 127.0.0.2 127.0.0.3)')
|
|
|
|
group.add_argument('--no-shard', action='store_false', default=None, dest='shard', help='Do not run shard related tests')
|
2017-05-01 21:27:11 +00:00
|
|
|
|
|
|
|
args = parser.parse_args()
|
2018-01-12 13:56:02 +00:00
|
|
|
|
2020-08-13 18:45:55 +00:00
|
|
|
if args.queries and not os.path.isdir(args.queries):
|
2021-08-05 14:15:51 +00:00
|
|
|
print(f"Cannot access the specified directory with queries ({args.queries})", file=sys.stderr)
|
2021-01-26 20:36:04 +00:00
|
|
|
sys.exit(1)
|
2020-08-13 18:45:55 +00:00
|
|
|
|
|
|
|
# Autodetect the directory with queries if not specified
|
|
|
|
if args.queries is None:
|
2018-01-12 13:56:02 +00:00
|
|
|
args.queries = 'queries'
|
2020-08-13 18:45:55 +00:00
|
|
|
|
|
|
|
if not os.path.isdir(args.queries):
|
|
|
|
# If we're running from the repo
|
2021-08-06 14:38:28 +00:00
|
|
|
args.queries = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'queries')
|
2020-08-13 18:45:55 +00:00
|
|
|
|
|
|
|
if not os.path.isdir(args.queries):
|
|
|
|
# Next we're going to try some system directories, don't write 'stdout' files into them.
|
2018-01-12 13:56:02 +00:00
|
|
|
if args.tmp is None:
|
|
|
|
args.tmp = '/tmp/clickhouse-test'
|
2020-08-13 18:45:55 +00:00
|
|
|
|
|
|
|
args.queries = '/usr/local/share/clickhouse-test/queries'
|
|
|
|
|
|
|
|
if not os.path.isdir(args.queries):
|
|
|
|
args.queries = '/usr/share/clickhouse-test/queries'
|
|
|
|
|
|
|
|
if not os.path.isdir(args.queries):
|
2020-04-17 20:36:08 +00:00
|
|
|
print("Failed to detect path to the queries directory. Please specify it with '--queries' option.", file=sys.stderr)
|
2021-01-26 20:36:04 +00:00
|
|
|
sys.exit(1)
|
2020-07-03 10:57:16 +00:00
|
|
|
|
2020-08-13 18:45:55 +00:00
|
|
|
print("Using queries from '" + args.queries + "' directory")
|
|
|
|
|
2020-07-03 11:15:30 +00:00
|
|
|
if args.skip_list_path is None:
|
|
|
|
args.skip_list_path = os.path.join(args.queries, 'skip_list.json')
|
2020-07-03 10:57:16 +00:00
|
|
|
|
2020-09-24 14:54:10 +00:00
|
|
|
if args.sequential is None:
|
|
|
|
args.sequential = set([])
|
|
|
|
|
2018-01-16 20:17:31 +00:00
|
|
|
if args.tmp is None:
|
|
|
|
args.tmp = args.queries
|
2017-10-12 19:46:24 +00:00
|
|
|
if args.client is None:
|
2019-01-24 11:02:55 +00:00
|
|
|
if find_binary(args.binary + '-client'):
|
2018-11-07 11:00:46 +00:00
|
|
|
args.client = args.binary + '-client'
|
2020-10-12 11:17:35 +00:00
|
|
|
|
2021-01-15 21:33:53 +00:00
|
|
|
print("Using " + args.client + " as client program (expecting split build)")
|
2019-01-24 11:02:55 +00:00
|
|
|
elif find_binary(args.binary):
|
2018-11-07 11:00:46 +00:00
|
|
|
args.client = args.binary + ' client'
|
2020-10-12 11:17:35 +00:00
|
|
|
|
2021-01-15 21:33:53 +00:00
|
|
|
print("Using " + args.client + " as client program (expecting monolithic build)")
|
2019-01-23 14:05:11 +00:00
|
|
|
else:
|
2020-10-12 11:17:35 +00:00
|
|
|
print("No 'clickhouse' or 'clickhouse-client' client binary found", file=sys.stderr)
|
2019-01-24 11:02:55 +00:00
|
|
|
parser.print_help()
|
2021-01-26 20:36:04 +00:00
|
|
|
sys.exit(1)
|
2019-01-24 11:02:55 +00:00
|
|
|
|
2018-01-18 20:33:16 +00:00
|
|
|
if args.configclient:
|
2018-05-25 18:05:30 +00:00
|
|
|
args.client += ' --config-file=' + args.configclient
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2018-05-25 18:05:30 +00:00
|
|
|
if os.getenv("CLICKHOUSE_HOST"):
|
|
|
|
args.client += ' --host=' + os.getenv("CLICKHOUSE_HOST")
|
2021-08-05 14:15:51 +00:00
|
|
|
|
2021-08-06 14:38:28 +00:00
|
|
|
args.tcp_port = int(os.getenv("CLICKHOUSE_PORT_TCP", "9000"))
|
2021-08-05 14:15:51 +00:00
|
|
|
args.client += f" --port={args.tcp_port}"
|
|
|
|
|
2019-04-11 17:20:36 +00:00
|
|
|
if os.getenv("CLICKHOUSE_DATABASE"):
|
|
|
|
args.client += ' --database=' + os.getenv("CLICKHOUSE_DATABASE")
|
|
|
|
|
2019-10-11 10:30:32 +00:00
|
|
|
if args.client_option:
|
2019-10-11 13:34:26 +00:00
|
|
|
# Set options for client
|
|
|
|
if 'CLICKHOUSE_CLIENT_OPT' in os.environ:
|
2021-01-26 20:36:04 +00:00
|
|
|
os.environ['CLICKHOUSE_CLIENT_OPT'] += ' '
|
2019-10-11 13:34:26 +00:00
|
|
|
else:
|
2021-01-26 20:36:04 +00:00
|
|
|
os.environ['CLICKHOUSE_CLIENT_OPT'] = ''
|
2019-10-11 13:34:26 +00:00
|
|
|
|
|
|
|
os.environ['CLICKHOUSE_CLIENT_OPT'] += get_additional_client_options(args)
|
|
|
|
|
|
|
|
# Set options for curl
|
|
|
|
if 'CLICKHOUSE_URL_PARAMS' in os.environ:
|
2021-01-26 20:36:04 +00:00
|
|
|
os.environ['CLICKHOUSE_URL_PARAMS'] += '&'
|
2019-10-11 13:34:26 +00:00
|
|
|
else:
|
2021-01-26 20:36:04 +00:00
|
|
|
os.environ['CLICKHOUSE_URL_PARAMS'] = ''
|
2019-10-11 13:34:26 +00:00
|
|
|
|
|
|
|
os.environ['CLICKHOUSE_URL_PARAMS'] += get_additional_client_options_url(args)
|
|
|
|
|
2018-11-07 11:00:46 +00:00
|
|
|
if args.extract_from_config is None:
|
|
|
|
if os.access(args.binary + '-extract-from-config', os.X_OK):
|
|
|
|
args.extract_from_config = args.binary + '-extract-from-config'
|
|
|
|
else:
|
|
|
|
args.extract_from_config = args.binary + ' extract-from-config'
|
|
|
|
|
2019-06-20 09:12:49 +00:00
|
|
|
if args.jobs is None:
|
2019-10-11 10:30:32 +00:00
|
|
|
args.jobs = multiprocessing.cpu_count()
|
2019-06-20 09:12:49 +00:00
|
|
|
|
2017-05-01 21:27:11 +00:00
|
|
|
main(args)
|