mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
clickhouse-test - prepare for use other than 'test' database (#4961)
This commit is contained in:
parent
3accb16cc6
commit
c78176603c
@ -36,6 +36,8 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY)
|
|||||||
set (ENABLE_DATA_SQLITE 0 CACHE BOOL "")
|
set (ENABLE_DATA_SQLITE 0 CACHE BOOL "")
|
||||||
set (ENABLE_DATA_MYSQL 0 CACHE BOOL "")
|
set (ENABLE_DATA_MYSQL 0 CACHE BOOL "")
|
||||||
set (ENABLE_DATA_POSTGRESQL 0 CACHE BOOL "")
|
set (ENABLE_DATA_POSTGRESQL 0 CACHE BOOL "")
|
||||||
|
set (ENABLE_ENCODINGS 0 CACHE BOOL "")
|
||||||
|
|
||||||
# new after 2.0.0:
|
# new after 2.0.0:
|
||||||
set (POCO_ENABLE_ZIP 0 CACHE BOOL "")
|
set (POCO_ENABLE_ZIP 0 CACHE BOOL "")
|
||||||
set (POCO_ENABLE_PAGECOMPILER 0 CACHE BOOL "")
|
set (POCO_ENABLE_PAGECOMPILER 0 CACHE BOOL "")
|
||||||
|
@ -43,7 +43,7 @@ def remove_control_characters(s):
|
|||||||
|
|
||||||
def run_single_test(args, ext, server_logs_level, case_file, stdout_file, stderr_file):
|
def run_single_test(args, ext, server_logs_level, case_file, stdout_file, stderr_file):
|
||||||
if ext == '.sql':
|
if ext == '.sql':
|
||||||
command = "{0} --send_logs_level={1} --testmode --multiquery < {2} > {3} 2> {4}".format(args.client, server_logs_level, case_file, stdout_file, stderr_file)
|
command = "{0} --send_logs_level={1} --testmode --multiquery < {2} > {3} 2> {4}".format(args.client_with_database, server_logs_level, case_file, stdout_file, stderr_file)
|
||||||
else:
|
else:
|
||||||
command = "{} > {} 2> {}".format(case_file, stdout_file, stderr_file)
|
command = "{} > {} 2> {}".format(case_file, stdout_file, stderr_file)
|
||||||
|
|
||||||
@ -137,6 +137,7 @@ def main(args):
|
|||||||
if args.configclient:
|
if args.configclient:
|
||||||
os.environ.setdefault("CLICKHOUSE_CONFIG_CLIENT", args.configclient)
|
os.environ.setdefault("CLICKHOUSE_CONFIG_CLIENT", args.configclient)
|
||||||
os.environ.setdefault("CLICKHOUSE_TMP", tmp_dir)
|
os.environ.setdefault("CLICKHOUSE_TMP", tmp_dir)
|
||||||
|
os.environ.setdefault("CLICKHOUSE_DATABASE", args.database)
|
||||||
|
|
||||||
# Force to print server warnings in stderr
|
# Force to print server warnings in stderr
|
||||||
# Shell scripts could change logging level
|
# Shell scripts could change logging level
|
||||||
@ -165,7 +166,12 @@ def main(args):
|
|||||||
failures_total = 0
|
failures_total = 0
|
||||||
|
|
||||||
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
clickhouse_proc_create = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
clickhouse_proc_create.communicate("CREATE DATABASE IF NOT EXISTS test")
|
clickhouse_proc_create.communicate("CREATE DATABASE IF NOT EXISTS " + args.database)
|
||||||
|
|
||||||
|
def is_test_from_dir(suite_dir, case):
|
||||||
|
case_file = os.path.join(suite_dir, case)
|
||||||
|
(name, ext) = os.path.splitext(case)
|
||||||
|
return os.path.isfile(case_file) and (ext == '.sql' or ext == '.sh' or ext == '.py')
|
||||||
|
|
||||||
def sute_key_func(item):
|
def sute_key_func(item):
|
||||||
if args.order == 'random':
|
if args.order == 'random':
|
||||||
@ -196,7 +202,6 @@ def main(args):
|
|||||||
|
|
||||||
suite = suite_re_obj.group(1)
|
suite = suite_re_obj.group(1)
|
||||||
if os.path.isdir(suite_dir):
|
if os.path.isdir(suite_dir):
|
||||||
print("\nRunning {} tests.\n".format(suite))
|
|
||||||
|
|
||||||
failures = 0
|
failures = 0
|
||||||
failures_chain = 0
|
failures_chain = 0
|
||||||
@ -225,166 +230,180 @@ def main(args):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
return 99997
|
return 99997
|
||||||
|
|
||||||
for case in sorted(filter(lambda case: re.search(args.test, case) if args.test else True, os.listdir(suite_dir)), key=key_func):
|
run_n, run_total = args.parallel.split('/')
|
||||||
|
run_n = float(run_n)
|
||||||
|
run_total = float(run_total)
|
||||||
|
all_tests = os.listdir(suite_dir)
|
||||||
|
all_tests = filter(lambda case: is_test_from_dir(suite_dir, case), all_tests)
|
||||||
|
all_tests = sorted(filter(lambda case: re.search(args.test, case) if args.test else True, all_tests), key=key_func)
|
||||||
|
tests_n = len(all_tests)
|
||||||
|
start = int(tests_n / run_total * (run_n - 1))
|
||||||
|
if start > 0:
|
||||||
|
start = start + 1
|
||||||
|
end = int(tests_n / run_total * (run_n))
|
||||||
|
all_tests = all_tests[start : end]
|
||||||
|
|
||||||
|
print("\nRunning {} {} tests.".format(tests_n, suite) + (" {} .. {} ".format(start, end) if run_total > 1 else "") + "\n")
|
||||||
|
|
||||||
|
for case in all_tests:
|
||||||
if SERVER_DIED:
|
if SERVER_DIED:
|
||||||
break
|
break
|
||||||
|
|
||||||
case_file = os.path.join(suite_dir, case)
|
case_file = os.path.join(suite_dir, case)
|
||||||
(name, ext) = os.path.splitext(case)
|
(name, ext) = os.path.splitext(case)
|
||||||
|
|
||||||
if os.path.isfile(case_file) and (ext == '.sql' or ext == '.sh' or ext == '.py'):
|
report_testcase = et.Element("testcase", attrib = {"name": name})
|
||||||
report_testcase = et.Element("testcase", attrib = {"name": name})
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print "{0:72}".format(name + ": "),
|
print "{0:72}".format(name + ": "),
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
|
|
||||||
if args.skip and any(s in name for s in args.skip):
|
if args.skip and any(s in name for s in args.skip):
|
||||||
report_testcase.append(et.Element("skipped", attrib = {"message": "skip"}))
|
report_testcase.append(et.Element("skipped", attrib = {"message": "skip"}))
|
||||||
print(MSG_SKIPPED + " - skip")
|
print(MSG_SKIPPED + " - skip")
|
||||||
skipped_total += 1
|
skipped_total += 1
|
||||||
elif not args.zookeeper and 'zookeeper' in name:
|
elif not args.zookeeper and 'zookeeper' in name:
|
||||||
report_testcase.append(et.Element("skipped", attrib = {"message": "no zookeeper"}))
|
report_testcase.append(et.Element("skipped", attrib = {"message": "no zookeeper"}))
|
||||||
print(MSG_SKIPPED + " - no zookeeper")
|
print(MSG_SKIPPED + " - no zookeeper")
|
||||||
skipped_total += 1
|
skipped_total += 1
|
||||||
elif not args.shard and 'shard' in name:
|
elif not args.shard and 'shard' in name:
|
||||||
report_testcase.append(et.Element("skipped", attrib = {"message": "no shard"}))
|
report_testcase.append(et.Element("skipped", attrib = {"message": "no shard"}))
|
||||||
print(MSG_SKIPPED + " - no shard")
|
print(MSG_SKIPPED + " - no shard")
|
||||||
skipped_total += 1
|
skipped_total += 1
|
||||||
elif not args.no_long and 'long' in name:
|
elif not args.no_long and 'long' in name:
|
||||||
report_testcase.append(et.Element("skipped", attrib = {"message": "no long"}))
|
report_testcase.append(et.Element("skipped", attrib = {"message": "no long"}))
|
||||||
print(MSG_SKIPPED + " - no long")
|
print(MSG_SKIPPED + " - no long")
|
||||||
skipped_total += 1
|
skipped_total += 1
|
||||||
|
else:
|
||||||
|
disabled_file = os.path.join(suite_dir, name) + '.disabled'
|
||||||
|
|
||||||
|
if os.path.exists(disabled_file) and not args.disabled:
|
||||||
|
message = open(disabled_file, 'r').read()
|
||||||
|
report_testcase.append(et.Element("skipped", attrib = {"message": message}))
|
||||||
|
print(MSG_SKIPPED + " - " + message)
|
||||||
else:
|
else:
|
||||||
disabled_file = os.path.join(suite_dir, name) + '.disabled'
|
|
||||||
|
|
||||||
if os.path.exists(disabled_file) and not args.disabled:
|
if args.testname:
|
||||||
message = open(disabled_file, 'r').read()
|
clickhouse_proc = Popen(shlex.split(args.client_with_database), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||||
report_testcase.append(et.Element("skipped", attrib = {"message": message}))
|
clickhouse_proc.communicate("SELECT 'Running test {suite}/{case} from pid={pid}';".format(pid = os.getpid(), case = case, suite = suite))
|
||||||
print(MSG_SKIPPED + " - " + message)
|
|
||||||
|
reference_file = os.path.join(suite_dir, name) + '.reference'
|
||||||
|
stdout_file = os.path.join(suite_tmp_dir, name) + '.stdout'
|
||||||
|
stderr_file = os.path.join(suite_tmp_dir, name) + '.stderr'
|
||||||
|
|
||||||
|
proc, stdout, stderr = run_single_test(args, ext, server_logs_level, case_file, stdout_file, stderr_file)
|
||||||
|
if proc.returncode is None:
|
||||||
|
try:
|
||||||
|
proc.kill()
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != ESRCH:
|
||||||
|
raise
|
||||||
|
|
||||||
|
failure = et.Element("failure", attrib = {"message": "Timeout"})
|
||||||
|
report_testcase.append(failure)
|
||||||
|
|
||||||
|
failures += 1
|
||||||
|
print("{0} - Timeout!".format(MSG_FAIL))
|
||||||
else:
|
else:
|
||||||
|
counter = 1
|
||||||
|
while proc.returncode != 0 and need_retry(stderr):
|
||||||
|
proc, stdout, stderr = run_single_test(args, ext, server_logs_level, case_file, stdout_file, stderr_file)
|
||||||
|
sleep(2**counter)
|
||||||
|
counter += 1
|
||||||
|
if counter > 6:
|
||||||
|
break
|
||||||
|
|
||||||
if args.testname:
|
if proc.returncode != 0:
|
||||||
clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
failure = et.Element("failure", attrib = {"message": "return code {}".format(proc.returncode)})
|
||||||
clickhouse_proc.communicate("SELECT 'Running test {suite}/{case} from pid={pid}';".format(pid = os.getpid(), case = case, suite = suite))
|
|
||||||
|
|
||||||
reference_file = os.path.join(suite_dir, name) + '.reference'
|
|
||||||
stdout_file = os.path.join(suite_tmp_dir, name) + '.stdout'
|
|
||||||
stderr_file = os.path.join(suite_tmp_dir, name) + '.stderr'
|
|
||||||
|
|
||||||
proc, stdout, stderr = run_single_test(args, ext, server_logs_level, case_file, stdout_file, stderr_file)
|
|
||||||
if proc.returncode is None:
|
|
||||||
try:
|
|
||||||
proc.kill()
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno != ESRCH:
|
|
||||||
raise
|
|
||||||
|
|
||||||
failure = et.Element("failure", attrib = {"message": "Timeout"})
|
|
||||||
report_testcase.append(failure)
|
report_testcase.append(failure)
|
||||||
|
|
||||||
|
stdout_element = et.Element("system-out")
|
||||||
|
stdout_element.text = et.CDATA(stdout)
|
||||||
|
report_testcase.append(stdout_element)
|
||||||
|
|
||||||
failures += 1
|
failures += 1
|
||||||
print("{0} - Timeout!".format(MSG_FAIL))
|
failures_chain += 1
|
||||||
else:
|
print("{0} - return code {1}".format(MSG_FAIL, proc.returncode))
|
||||||
counter = 1
|
|
||||||
while proc.returncode != 0 and need_retry(stderr):
|
|
||||||
proc, stdout, stderr = run_single_test(args, ext, server_logs_level, case_file, stdout_file, stderr_file)
|
|
||||||
sleep(2**counter)
|
|
||||||
counter += 1
|
|
||||||
if counter > 6:
|
|
||||||
break
|
|
||||||
|
|
||||||
if proc.returncode != 0:
|
|
||||||
failure = et.Element("failure", attrib = {"message": "return code {}".format(proc.returncode)})
|
|
||||||
report_testcase.append(failure)
|
|
||||||
|
|
||||||
stdout_element = et.Element("system-out")
|
|
||||||
stdout_element.text = et.CDATA(stdout)
|
|
||||||
report_testcase.append(stdout_element)
|
|
||||||
|
|
||||||
failures += 1
|
|
||||||
failures_chain += 1
|
|
||||||
print("{0} - return code {1}".format(MSG_FAIL, proc.returncode))
|
|
||||||
|
|
||||||
if stderr:
|
|
||||||
stderr_element = et.Element("system-err")
|
|
||||||
stderr_element.text = et.CDATA(stderr)
|
|
||||||
report_testcase.append(stderr_element)
|
|
||||||
print(stderr.encode('utf-8'))
|
|
||||||
|
|
||||||
if args.stop and ('Connection refused' in stderr or 'Attempt to read after eof' in stderr) and not 'Received exception from server' in stderr:
|
|
||||||
SERVER_DIED = True
|
|
||||||
|
|
||||||
elif stderr:
|
|
||||||
failure = et.Element("failure", attrib = {"message": "having stderror"})
|
|
||||||
report_testcase.append(failure)
|
|
||||||
|
|
||||||
|
if stderr:
|
||||||
stderr_element = et.Element("system-err")
|
stderr_element = et.Element("system-err")
|
||||||
stderr_element.text = et.CDATA(stderr)
|
stderr_element.text = et.CDATA(stderr)
|
||||||
report_testcase.append(stderr_element)
|
report_testcase.append(stderr_element)
|
||||||
|
print(stderr.encode('utf-8'))
|
||||||
|
|
||||||
failures += 1
|
if args.stop and ('Connection refused' in stderr or 'Attempt to read after eof' in stderr) and not 'Received exception from server' in stderr:
|
||||||
failures_chain += 1
|
SERVER_DIED = True
|
||||||
print("{0} - having stderror:\n{1}".format(MSG_FAIL, stderr.encode('utf-8')))
|
|
||||||
elif 'Exception' in stdout:
|
elif stderr:
|
||||||
failure = et.Element("error", attrib = {"message": "having exception"})
|
failure = et.Element("failure", attrib = {"message": "having stderror"})
|
||||||
|
report_testcase.append(failure)
|
||||||
|
|
||||||
|
stderr_element = et.Element("system-err")
|
||||||
|
stderr_element.text = et.CDATA(stderr)
|
||||||
|
report_testcase.append(stderr_element)
|
||||||
|
|
||||||
|
failures += 1
|
||||||
|
failures_chain += 1
|
||||||
|
print("{0} - having stderror:\n{1}".format(MSG_FAIL, stderr.encode('utf-8')))
|
||||||
|
elif 'Exception' in stdout:
|
||||||
|
failure = et.Element("error", attrib = {"message": "having exception"})
|
||||||
|
report_testcase.append(failure)
|
||||||
|
|
||||||
|
stdout_element = et.Element("system-out")
|
||||||
|
stdout_element.text = et.CDATA(stdout)
|
||||||
|
report_testcase.append(stdout_element)
|
||||||
|
|
||||||
|
failures += 1
|
||||||
|
failures_chain += 1
|
||||||
|
print("{0} - having exception:\n{1}".format(MSG_FAIL, stdout.encode('utf-8')))
|
||||||
|
elif not os.path.isfile(reference_file):
|
||||||
|
skipped = et.Element("skipped", attrib = {"message": "no reference file"})
|
||||||
|
report_testcase.append(skipped)
|
||||||
|
print("{0} - no reference file".format(MSG_UNKNOWN))
|
||||||
|
else:
|
||||||
|
result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout = PIPE)
|
||||||
|
|
||||||
|
if result_is_different:
|
||||||
|
diff = Popen(['diff', '--unified', reference_file, stdout_file], stdout = PIPE).communicate()[0]
|
||||||
|
diff = unicode(diff, errors='replace', encoding='utf-8')
|
||||||
|
cat = Popen(['cat', '-vet'], stdin=PIPE, stdout=PIPE).communicate(input=diff.encode(encoding='utf-8', errors='replace'))[0]
|
||||||
|
|
||||||
|
failure = et.Element("failure", attrib = {"message": "result differs with reference"})
|
||||||
report_testcase.append(failure)
|
report_testcase.append(failure)
|
||||||
|
|
||||||
stdout_element = et.Element("system-out")
|
stdout_element = et.Element("system-out")
|
||||||
stdout_element.text = et.CDATA(stdout)
|
try:
|
||||||
|
stdout_element.text = et.CDATA(diff)
|
||||||
|
except:
|
||||||
|
stdout_element.text = et.CDATA(remove_control_characters(diff))
|
||||||
|
|
||||||
report_testcase.append(stdout_element)
|
report_testcase.append(stdout_element)
|
||||||
|
|
||||||
failures += 1
|
failures += 1
|
||||||
failures_chain += 1
|
print("{0} - result differs with reference:\n{1}".format(MSG_FAIL, cat.encode('utf-8')))
|
||||||
print("{0} - having exception:\n{1}".format(MSG_FAIL, stdout.encode('utf-8')))
|
|
||||||
elif not os.path.isfile(reference_file):
|
|
||||||
skipped = et.Element("skipped", attrib = {"message": "no reference file"})
|
|
||||||
report_testcase.append(skipped)
|
|
||||||
print("{0} - no reference file".format(MSG_UNKNOWN))
|
|
||||||
else:
|
else:
|
||||||
result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout = PIPE)
|
passed_total += 1
|
||||||
|
failures_chain = 0
|
||||||
|
print(MSG_OK)
|
||||||
|
if os.path.exists(stdout_file):
|
||||||
|
os.remove(stdout_file)
|
||||||
|
if os.path.exists(stderr_file):
|
||||||
|
os.remove(stderr_file)
|
||||||
|
except KeyboardInterrupt as e:
|
||||||
|
print(colored("Break tests execution", "red"))
|
||||||
|
raise e
|
||||||
|
except:
|
||||||
|
import traceback
|
||||||
|
exc_type, exc_value, tb = sys.exc_info()
|
||||||
|
error = et.Element("error", attrib = {"type": exc_type.__name__, "message": str(exc_value)})
|
||||||
|
report_testcase.append(error)
|
||||||
|
|
||||||
if result_is_different:
|
failures += 1
|
||||||
diff = Popen(['diff', '--unified', reference_file, stdout_file], stdout = PIPE).communicate()[0]
|
print("{0} - Test internal error: {1}\n{2}\n{3}".format(MSG_FAIL, exc_type.__name__, exc_value, "\n".join(traceback.format_tb(tb, 10))))
|
||||||
diff = unicode(diff, errors='replace', encoding='utf-8')
|
finally:
|
||||||
cat = Popen(['cat', '-vet'], stdin=PIPE, stdout=PIPE).communicate(input=diff.encode(encoding='utf-8', errors='replace'))[0]
|
dump_report(args.output, suite, name, report_testcase)
|
||||||
|
|
||||||
failure = et.Element("failure", attrib = {"message": "result differs with reference"})
|
if failures_chain >= 20:
|
||||||
report_testcase.append(failure)
|
break
|
||||||
|
|
||||||
stdout_element = et.Element("system-out")
|
|
||||||
try:
|
|
||||||
stdout_element.text = et.CDATA(diff)
|
|
||||||
except:
|
|
||||||
stdout_element.text = et.CDATA(remove_control_characters(diff))
|
|
||||||
|
|
||||||
report_testcase.append(stdout_element)
|
|
||||||
failures += 1
|
|
||||||
print("{0} - result differs with reference:\n{1}".format(MSG_FAIL, cat.encode('utf-8')))
|
|
||||||
else:
|
|
||||||
passed_total += 1
|
|
||||||
failures_chain = 0
|
|
||||||
print(MSG_OK)
|
|
||||||
if os.path.exists(stdout_file):
|
|
||||||
os.remove(stdout_file)
|
|
||||||
if os.path.exists(stderr_file):
|
|
||||||
os.remove(stderr_file)
|
|
||||||
except KeyboardInterrupt as e:
|
|
||||||
print(colored("Break tests execution", "red"))
|
|
||||||
raise e
|
|
||||||
except:
|
|
||||||
import traceback
|
|
||||||
exc_type, exc_value, tb = sys.exc_info()
|
|
||||||
error = et.Element("error", attrib = {"type": exc_type.__name__, "message": str(exc_value)})
|
|
||||||
report_testcase.append(error)
|
|
||||||
|
|
||||||
failures += 1
|
|
||||||
print("{0} - Test internal error: {1}\n{2}\n{3}".format(MSG_FAIL, exc_type.__name__, exc_value, "\n".join(traceback.format_tb(tb, 10))))
|
|
||||||
finally:
|
|
||||||
dump_report(args.output, suite, name, report_testcase)
|
|
||||||
|
|
||||||
if failures_chain >= 20:
|
|
||||||
break
|
|
||||||
|
|
||||||
failures_total = failures_total + failures
|
failures_total = failures_total + failures
|
||||||
|
|
||||||
@ -396,7 +415,7 @@ def main(args):
|
|||||||
print(colored("\n{passed_total} tests passed. {skipped_total} tests skipped.".format(passed_total = passed_total, skipped_total = skipped_total), "green", attrs=["bold"]))
|
print(colored("\n{passed_total} tests passed. {skipped_total} tests skipped.".format(passed_total = passed_total, skipped_total = skipped_total), "green", attrs=["bold"]))
|
||||||
|
|
||||||
if args.hung_check:
|
if args.hung_check:
|
||||||
processlist = get_processlist(args.client)
|
processlist = get_processlist(args.client_with_database)
|
||||||
if processlist:
|
if processlist:
|
||||||
server_pid = get_server_pid(os.getenv("CLICKHOUSE_PORT_TCP", '9000'))
|
server_pid = get_server_pid(os.getenv("CLICKHOUSE_PORT_TCP", '9000'))
|
||||||
print(colored("\nFound hung queries in processlist:", "red", attrs=["bold"]))
|
print(colored("\nFound hung queries in processlist:", "red", attrs=["bold"]))
|
||||||
@ -440,6 +459,8 @@ if __name__ == '__main__':
|
|||||||
parser.add_argument('--testname', action='store_true', default=None, dest='testname', help='Make query with test name before test run')
|
parser.add_argument('--testname', action='store_true', default=None, dest='testname', help='Make query with test name before test run')
|
||||||
parser.add_argument('--hung-check', action='store_true', default=False)
|
parser.add_argument('--hung-check', action='store_true', default=False)
|
||||||
parser.add_argument('--force-color', action='store_true', default=False)
|
parser.add_argument('--force-color', action='store_true', default=False)
|
||||||
|
parser.add_argument('--database', default='test', help='Default database for tests')
|
||||||
|
parser.add_argument('--parallel', default='1/1', help='Parralel test run number/total')
|
||||||
|
|
||||||
parser.add_argument('--no-stateless', action='store_true', help='Disable all stateless tests')
|
parser.add_argument('--no-stateless', action='store_true', help='Disable all stateless tests')
|
||||||
parser.add_argument('--skip', nargs='+', help="Skip these tests")
|
parser.add_argument('--skip', nargs='+', help="Skip these tests")
|
||||||
@ -480,6 +501,12 @@ if __name__ == '__main__':
|
|||||||
args.client += ' --host=' + os.getenv("CLICKHOUSE_HOST")
|
args.client += ' --host=' + os.getenv("CLICKHOUSE_HOST")
|
||||||
if os.getenv("CLICKHOUSE_PORT_TCP"):
|
if os.getenv("CLICKHOUSE_PORT_TCP"):
|
||||||
args.client += ' --port=' + os.getenv("CLICKHOUSE_PORT_TCP")
|
args.client += ' --port=' + os.getenv("CLICKHOUSE_PORT_TCP")
|
||||||
|
if os.getenv("CLICKHOUSE_DATABASE"):
|
||||||
|
args.client += ' --database=' + os.getenv("CLICKHOUSE_DATABASE")
|
||||||
|
|
||||||
|
args.client_with_database = args.client
|
||||||
|
if args.database:
|
||||||
|
args.client_with_database += ' --database=' + args.database
|
||||||
|
|
||||||
if args.extract_from_config is None:
|
if args.extract_from_config is None:
|
||||||
if os.access(args.binary + '-extract-from-config', os.X_OK):
|
if os.access(args.binary + '-extract-from-config', os.X_OK):
|
||||||
|
@ -137,7 +137,7 @@ else
|
|||||||
# Running test in parallel will fail some results (tests can create/fill/drop same tables)
|
# Running test in parallel will fail some results (tests can create/fill/drop same tables)
|
||||||
TEST_NPROC=${TEST_NPROC:=$(( `nproc || sysctl -n hw.ncpu || echo 2` * 2))}
|
TEST_NPROC=${TEST_NPROC:=$(( `nproc || sysctl -n hw.ncpu || echo 2` * 2))}
|
||||||
for i in `seq 1 ${TEST_NPROC}`; do
|
for i in `seq 1 ${TEST_NPROC}`; do
|
||||||
$CLICKHOUSE_TEST --order=random --testname &
|
$CLICKHOUSE_TEST --order=random --testname --tmp=$DATA_DIR/tmp/tmp${i} &
|
||||||
done
|
done
|
||||||
$CLICKHOUSE_PERFORMANCE_TEST &
|
$CLICKHOUSE_PERFORMANCE_TEST &
|
||||||
fi
|
fi
|
||||||
|
@ -14,9 +14,9 @@ Don't use Docker from your system repository.
|
|||||||
|
|
||||||
* [pip](https://pypi.python.org/pypi/pip). To install: `sudo apt-get install python-pip`
|
* [pip](https://pypi.python.org/pypi/pip). To install: `sudo apt-get install python-pip`
|
||||||
* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest`
|
* [py.test](https://docs.pytest.org/) testing framework. To install: `sudo -H pip install pytest`
|
||||||
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install docker-compose docker dicttoxml kazoo PyMySQL psycopg2 pymongo tzlocal`
|
* [docker-compose](https://docs.docker.com/compose/) and additional python libraries. To install: `sudo -H pip install docker-compose docker dicttoxml kazoo PyMySQL psycopg2 pymongo tzlocal kafka-python`
|
||||||
|
|
||||||
(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python-pytest python-dicttoxml python-docker python-pymysql python-pymongo python-tzlocal python-kazoo python-psycopg2`
|
(highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python-pytest python-dicttoxml python-docker python-pymysql python-pymongo python-tzlocal python-kazoo python-psycopg2 python-kafka`
|
||||||
|
|
||||||
If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login.
|
If you want to run the tests under a non-privileged user, you must add this user to `docker` group: `sudo usermod -aG docker $USER` and re-login.
|
||||||
(You must close all your sessions (for example, restart your computer))
|
(You must close all your sessions (for example, restart your computer))
|
||||||
|
@ -384,6 +384,14 @@ def test_create_as_select(started_cluster):
|
|||||||
ddl_check_query(instance, "DROP TABLE IF EXISTS test_as_select ON CLUSTER cluster")
|
ddl_check_query(instance, "DROP TABLE IF EXISTS test_as_select ON CLUSTER cluster")
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_reserved(started_cluster):
|
||||||
|
instance = cluster.instances['ch2']
|
||||||
|
ddl_check_query(instance, "CREATE TABLE test_reserved ON CLUSTER cluster (`p` Date, `image` Nullable(String), `index` Nullable(Float64), `invalidate` Nullable(Int64)) ENGINE = MergeTree(`p`, `p`, 8192)")
|
||||||
|
ddl_check_query(instance, "CREATE TABLE test_as_reserved ON CLUSTER cluster ENGINE = Memory AS (SELECT * from test_reserved)")
|
||||||
|
ddl_check_query(instance, "DROP TABLE IF EXISTS test_reserved ON CLUSTER cluster")
|
||||||
|
ddl_check_query(instance, "DROP TABLE IF EXISTS test_as_reserved ON CLUSTER cluster")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with contextmanager(started_cluster)() as cluster:
|
with contextmanager(started_cluster)() as cluster:
|
||||||
for name, instance in cluster.instances.items():
|
for name, instance in cluster.instances.items():
|
||||||
|
@ -6,7 +6,7 @@ CREATE TABLE default.test_table (EventDate Date, CounterID UInt32, UserID UInt6
|
|||||||
CREATE MATERIALIZED VIEW default.test_view (Rows UInt64, MaxHitTime DateTime) ENGINE = Memory AS SELECT count() AS Rows, max(UTCEventTime) AS MaxHitTime FROM default.test_table;
|
CREATE MATERIALIZED VIEW default.test_view (Rows UInt64, MaxHitTime DateTime) ENGINE = Memory AS SELECT count() AS Rows, max(UTCEventTime) AS MaxHitTime FROM default.test_table;
|
||||||
CREATE MATERIALIZED VIEW default.test_view_filtered (EventDate Date, CounterID UInt32) ENGINE = Memory POPULATE AS SELECT CounterID, EventDate FROM default.test_table WHERE EventDate < '2013-01-01';
|
CREATE MATERIALIZED VIEW default.test_view_filtered (EventDate Date, CounterID UInt32) ENGINE = Memory POPULATE AS SELECT CounterID, EventDate FROM default.test_table WHERE EventDate < '2013-01-01';
|
||||||
|
|
||||||
INSERT INTO test_table (EventDate, UTCEventTime) VALUES ('2014-01-02', '2014-01-02 03:04:06');
|
INSERT INTO default.test_table (EventDate, UTCEventTime) VALUES ('2014-01-02', '2014-01-02 03:04:06');
|
||||||
|
|
||||||
SELECT * FROM default.test_table;
|
SELECT * FROM default.test_table;
|
||||||
SELECT * FROM default.test_view;
|
SELECT * FROM default.test_view;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
DROP TABLE IF EXISTS numbers_memory;
|
DROP TABLE IF EXISTS test.numbers_memory;
|
||||||
CREATE TABLE numbers_memory AS system.numbers ENGINE = Memory;
|
CREATE TABLE test.numbers_memory AS system.numbers ENGINE = Memory;
|
||||||
INSERT INTO numbers_memory SELECT number FROM system.numbers LIMIT 100;
|
INSERT INTO test.numbers_memory SELECT number FROM system.numbers LIMIT 100;
|
||||||
SELECT DISTINCT number FROM remote('127.0.0.{2,3}', default.numbers_memory) ORDER BY number LIMIT 10;
|
SELECT DISTINCT number FROM remote('127.0.0.{2,3}', test.numbers_memory) ORDER BY number LIMIT 10;
|
||||||
DROP TABLE numbers_memory;
|
DROP TABLE test.numbers_memory;
|
||||||
|
@ -65,7 +65,7 @@ DROP TABLE IF EXISTS test.merge_one_two;
|
|||||||
|
|
||||||
CREATE TABLE test.one (x Int32) ENGINE = Memory;
|
CREATE TABLE test.one (x Int32) ENGINE = Memory;
|
||||||
CREATE TABLE test.two (x UInt64) ENGINE = Memory;
|
CREATE TABLE test.two (x UInt64) ENGINE = Memory;
|
||||||
CREATE TABLE test.merge_one_two (x UInt64) ENGINE = Merge(test, '^one|two$');
|
CREATE TABLE test.merge_one_two (x UInt64) ENGINE = Merge(test, '^one$|^two$');
|
||||||
|
|
||||||
INSERT INTO test.one VALUES (1);
|
INSERT INTO test.one VALUES (1);
|
||||||
INSERT INTO test.two VALUES (1);
|
INSERT INTO test.two VALUES (1);
|
||||||
@ -86,7 +86,7 @@ DROP TABLE IF EXISTS test.merge_one_two;
|
|||||||
|
|
||||||
CREATE TABLE test.one (x String) ENGINE = Memory;
|
CREATE TABLE test.one (x String) ENGINE = Memory;
|
||||||
CREATE TABLE test.two (x FixedString(16)) ENGINE = Memory;
|
CREATE TABLE test.two (x FixedString(16)) ENGINE = Memory;
|
||||||
CREATE TABLE test.merge_one_two (x String) ENGINE = Merge(test, '^one|two$');
|
CREATE TABLE test.merge_one_two (x String) ENGINE = Merge(test, '^one$|^two$');
|
||||||
|
|
||||||
INSERT INTO test.one VALUES ('1');
|
INSERT INTO test.one VALUES ('1');
|
||||||
INSERT INTO test.two VALUES ('1');
|
INSERT INTO test.two VALUES ('1');
|
||||||
@ -102,7 +102,7 @@ DROP TABLE IF EXISTS test.merge_one_two;
|
|||||||
|
|
||||||
CREATE TABLE test.one (x DateTime) ENGINE = Memory;
|
CREATE TABLE test.one (x DateTime) ENGINE = Memory;
|
||||||
CREATE TABLE test.two (x UInt64) ENGINE = Memory;
|
CREATE TABLE test.two (x UInt64) ENGINE = Memory;
|
||||||
CREATE TABLE test.merge_one_two (x UInt64) ENGINE = Merge(test, '^one|two$');
|
CREATE TABLE test.merge_one_two (x UInt64) ENGINE = Merge(test, '^one$|^two$');
|
||||||
|
|
||||||
INSERT INTO test.one VALUES (1);
|
INSERT INTO test.one VALUES (1);
|
||||||
INSERT INTO test.two VALUES (1);
|
INSERT INTO test.two VALUES (1);
|
||||||
@ -118,7 +118,7 @@ DROP TABLE IF EXISTS test.merge_one_two;
|
|||||||
|
|
||||||
CREATE TABLE test.one (x Array(UInt32), z String DEFAULT '', y Array(UInt32)) ENGINE = Memory;
|
CREATE TABLE test.one (x Array(UInt32), z String DEFAULT '', y Array(UInt32)) ENGINE = Memory;
|
||||||
CREATE TABLE test.two (x Array(UInt64), z String DEFAULT '', y Array(UInt64)) ENGINE = Memory;
|
CREATE TABLE test.two (x Array(UInt64), z String DEFAULT '', y Array(UInt64)) ENGINE = Memory;
|
||||||
CREATE TABLE test.merge_one_two (x Array(UInt64), z String, y Array(UInt64)) ENGINE = Merge(test, '^one|two$');
|
CREATE TABLE test.merge_one_two (x Array(UInt64), z String, y Array(UInt64)) ENGINE = Merge(test, '^one$|^two$');
|
||||||
|
|
||||||
INSERT INTO test.one (x, y) VALUES ([1], [0]);
|
INSERT INTO test.one (x, y) VALUES ([1], [0]);
|
||||||
INSERT INTO test.two (x, y) VALUES ([1], [0]);
|
INSERT INTO test.two (x, y) VALUES ([1], [0]);
|
||||||
|
@ -10,3 +10,4 @@ insert into test.table select today() as date, [number], [number + 1], toFixedSt
|
|||||||
set preferred_max_column_in_block_size_bytes = 112;
|
set preferred_max_column_in_block_size_bytes = 112;
|
||||||
select blockSize(), * from test.table prewhere x = 7 format Null;
|
select blockSize(), * from test.table prewhere x = 7 format Null;
|
||||||
|
|
||||||
|
drop table if exists test.table;
|
||||||
|
@ -3,3 +3,4 @@ create table one_table (date Date, one UInt64) engine = MergeTree(date, (date, o
|
|||||||
insert into one_table select today(), toUInt64(1) from system.numbers limit 100000;
|
insert into one_table select today(), toUInt64(1) from system.numbers limit 100000;
|
||||||
SET preferred_block_size_bytes = 8192;
|
SET preferred_block_size_bytes = 8192;
|
||||||
select isNull(one) from one_table where isNull(one);
|
select isNull(one) from one_table where isNull(one);
|
||||||
|
drop table if exists one_table;
|
||||||
|
@ -366,3 +366,5 @@ select 'arrayPopFront(arr1)';
|
|||||||
select arr1, arrayPopFront(arr1) from test.array_functions;
|
select arr1, arrayPopFront(arr1) from test.array_functions;
|
||||||
select 'arrayPopBack(arr1)';
|
select 'arrayPopBack(arr1)';
|
||||||
select arr1, arrayPopBack(arr1) from test.array_functions;
|
select arr1, arrayPopBack(arr1) from test.array_functions;
|
||||||
|
|
||||||
|
DROP TABLE if exists test.array_functions;
|
||||||
|
@ -14,4 +14,4 @@ desc table (select 1);
|
|||||||
select '-';
|
select '-';
|
||||||
desc (select * from system.numbers);
|
desc (select * from system.numbers);
|
||||||
select '-';
|
select '-';
|
||||||
|
drop table if exists test.tab;
|
||||||
|
@ -10,3 +10,4 @@ insert into test.table select number, number / 8192 from system.numbers limit 10
|
|||||||
alter table test.table add column def UInt64;
|
alter table test.table add column def UInt64;
|
||||||
select * from test.table prewhere val > 2 format Null;
|
select * from test.table prewhere val > 2 format Null;
|
||||||
|
|
||||||
|
drop table if exists test.table;
|
||||||
|
@ -1,15 +1,19 @@
|
|||||||
drop table if exists test_in_tuple_1;
|
drop table if exists test.test_in_tuple_1;
|
||||||
drop table if exists test_in_tuple_2;
|
drop table if exists test.test_in_tuple_2;
|
||||||
drop table if exists test_in_tuple;
|
drop table if exists test.test_in_tuple;
|
||||||
|
|
||||||
create table test_in_tuple_1 (key Int32, key_2 Int32, x Array(Int32), y Array(Int32)) engine = MergeTree order by (key, key_2);
|
create table test.test_in_tuple_1 (key Int32, key_2 Int32, x Array(Int32), y Array(Int32)) engine = MergeTree order by (key, key_2);
|
||||||
create table test_in_tuple_2 (key Int32, key_2 Int32, x Array(Int32), y Array(Int32)) engine = MergeTree order by (key, key_2);
|
create table test.test_in_tuple_2 (key Int32, key_2 Int32, x Array(Int32), y Array(Int32)) engine = MergeTree order by (key, key_2);
|
||||||
create table test_in_tuple as test_in_tuple_1 engine = Merge('default', '^test_in_tuple_[0-9]+$');
|
create table test.test_in_tuple as test.test_in_tuple_1 engine = Merge('test', '^test_in_tuple_[0-9]+$');
|
||||||
|
|
||||||
insert into test_in_tuple_1 values (1, 1, [1, 2], [1, 2]);
|
insert into test.test_in_tuple_1 values (1, 1, [1, 2], [1, 2]);
|
||||||
insert into test_in_tuple_2 values (2, 1, [1, 2], [1, 2]);
|
insert into test.test_in_tuple_2 values (2, 1, [1, 2], [1, 2]);
|
||||||
select key, arr_x, arr_y, _table from test_in_tuple left array join x as arr_x, y as arr_y order by _table;
|
select key, arr_x, arr_y, _table from test.test_in_tuple left array join x as arr_x, y as arr_y order by _table;
|
||||||
select '-';
|
select '-';
|
||||||
select key, arr_x, arr_y, _table from test_in_tuple left array join x as arr_x, y as arr_y where (key_2, arr_x, arr_y) in (1, 1, 1) order by _table;
|
select key, arr_x, arr_y, _table from test.test_in_tuple left array join x as arr_x, y as arr_y where (key_2, arr_x, arr_y) in (1, 1, 1) order by _table;
|
||||||
select '-';
|
select '-';
|
||||||
select key, arr_x, arr_y, _table from test_in_tuple left array join arrayFilter((t, x_0, x_1) -> (key_2, x_0, x_1) in (1, 1, 1), x, x ,y) as arr_x, arrayFilter((t, x_0, x_1) -> (key_2, x_0, x_1) in (1, 1, 1), y, x ,y) as arr_y where (key_2, arr_x, arr_y) in (1, 1, 1) order by _table;
|
select key, arr_x, arr_y, _table from test.test_in_tuple left array join arrayFilter((t, x_0, x_1) -> (key_2, x_0, x_1) in (1, 1, 1), x, x ,y) as arr_x, arrayFilter((t, x_0, x_1) -> (key_2, x_0, x_1) in (1, 1, 1), y, x ,y) as arr_y where (key_2, arr_x, arr_y) in (1, 1, 1) order by _table;
|
||||||
|
|
||||||
|
drop table if exists test.test_in_tuple_1;
|
||||||
|
drop table if exists test.test_in_tuple_2;
|
||||||
|
drop table if exists test.test_in_tuple;
|
||||||
|
@ -18,3 +18,4 @@ select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y)) from test.tab;
|
|||||||
select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), n.x) from test.tab;
|
select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), n.x) from test.tab;
|
||||||
select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), arrayMap((a, b) -> (b, a), n.x, n.y)) from test.tab;
|
select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), arrayMap((a, b) -> (b, a), n.x, n.y)) from test.tab;
|
||||||
|
|
||||||
|
drop table test.tab;
|
||||||
|
@ -6,24 +6,24 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
. $CURDIR/../shell_config.sh
|
. $CURDIR/../shell_config.sh
|
||||||
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test.cannot_kill_query"
|
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS cannot_kill_query"
|
||||||
$CLICKHOUSE_CLIENT -q "CREATE TABLE test.cannot_kill_query (x UInt64) ENGINE = MergeTree ORDER BY x" &> /dev/null
|
$CLICKHOUSE_CLIENT -q "CREATE TABLE cannot_kill_query (x UInt64) ENGINE = MergeTree ORDER BY x" &> /dev/null
|
||||||
$CLICKHOUSE_CLIENT -q "INSERT INTO test.cannot_kill_query SELECT * FROM numbers(10000000)" &> /dev/null
|
$CLICKHOUSE_CLIENT -q "INSERT INTO cannot_kill_query SELECT * FROM numbers(10000000)" &> /dev/null
|
||||||
|
|
||||||
# This SELECT query will run for a long time. It's used as bloker for ALTER query. It will be killed with SYNC kill.
|
# This SELECT query will run for a long time. It's used as bloker for ALTER query. It will be killed with SYNC kill.
|
||||||
query_for_pending="SELECT count() FROM test.cannot_kill_query WHERE NOT ignore(sleep(1)) SETTINGS max_threads=1, max_block_size=1"
|
query_for_pending="SELECT count() FROM cannot_kill_query WHERE NOT ignore(sleep(1)) SETTINGS max_threads=1, max_block_size=1"
|
||||||
$CLICKHOUSE_CLIENT -q "$query_for_pending" &>/dev/null &
|
$CLICKHOUSE_CLIENT -q "$query_for_pending" &>/dev/null &
|
||||||
|
|
||||||
sleep 1 # queries should be in strict order
|
sleep 1 # queries should be in strict order
|
||||||
|
|
||||||
# This ALTER query will wait until $query_for_pending finished. Also it will block $query_to_kill.
|
# This ALTER query will wait until $query_for_pending finished. Also it will block $query_to_kill.
|
||||||
$CLICKHOUSE_CLIENT -q "ALTER TABLE test.cannot_kill_query MODIFY COLUMN x UInt64" &>/dev/null &
|
$CLICKHOUSE_CLIENT -q "ALTER TABLE cannot_kill_query MODIFY COLUMN x UInt64" &>/dev/null &
|
||||||
|
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
# This SELECT query will also run for a long time. Also it's blocked by ALTER query. It will be killed with ASYNC kill.
|
# This SELECT query will also run for a long time. Also it's blocked by ALTER query. It will be killed with ASYNC kill.
|
||||||
# This is main idea which we check -- blocked queries can be killed with ASYNC kill.
|
# This is main idea which we check -- blocked queries can be killed with ASYNC kill.
|
||||||
query_to_kill="SELECT sum(1) FROM test.cannot_kill_query WHERE NOT ignore(sleep(1)) SETTINGS max_threads=1"
|
query_to_kill="SELECT sum(1) FROM cannot_kill_query WHERE NOT ignore(sleep(1)) SETTINGS max_threads=1"
|
||||||
$CLICKHOUSE_CLIENT -q "$query_to_kill" &>/dev/null &
|
$CLICKHOUSE_CLIENT -q "$query_to_kill" &>/dev/null &
|
||||||
|
|
||||||
sleep 1 # just to be sure that kill of $query_to_kill will be executed after $query_to_kill.
|
sleep 1 # just to be sure that kill of $query_to_kill will be executed after $query_to_kill.
|
||||||
@ -48,4 +48,4 @@ do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test.cannot_kill_query" &>/dev/null
|
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS cannot_kill_query" &>/dev/null
|
||||||
|
@ -5,8 +5,6 @@ set -e
|
|||||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
. $CURDIR/../shell_config.sh
|
. $CURDIR/../shell_config.sh
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "USE test;"
|
|
||||||
|
|
||||||
for typename in "UInt32" "UInt64" "Float64" "Float32"
|
for typename in "UInt32" "UInt64" "Float64" "Float32"
|
||||||
do
|
do
|
||||||
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS A;"
|
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS A;"
|
||||||
@ -19,4 +17,7 @@ do
|
|||||||
$CLICKHOUSE_CLIENT -q "INSERT INTO B(k,t,b) VALUES (2,3,3);"
|
$CLICKHOUSE_CLIENT -q "INSERT INTO B(k,t,b) VALUES (2,3,3);"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -q "SELECT k, t, a, b FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (k,t);"
|
$CLICKHOUSE_CLIENT -q "SELECT k, t, a, b FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (k,t);"
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "DROP TABLE A;"
|
||||||
|
$CLICKHOUSE_CLIENT -q "DROP TABLE B;"
|
||||||
done
|
done
|
@ -4,13 +4,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
. $CURDIR/../shell_config.sh
|
. $CURDIR/../shell_config.sh
|
||||||
|
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS test.small_table"
|
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="CREATE TABLE test.small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a);"
|
$CLICKHOUSE_CLIENT --query="CREATE TABLE small_table (a UInt64 default 0, n UInt64) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY (a);"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="INSERT INTO test.small_table(n) SELECT * from system.numbers limit 100000;"
|
$CLICKHOUSE_CLIENT --query="INSERT INTO small_table(n) SELECT * from system.numbers limit 100000;"
|
||||||
|
|
||||||
cached_query="SELECT count() FROM test.small_table where n > 0;"
|
cached_query="SELECT count() FROM small_table where n > 0;"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --use_uncompressed_cache=1 --query="$cached_query" &> /dev/null
|
$CLICKHOUSE_CLIENT --use_uncompressed_cache=1 --query="$cached_query" &> /dev/null
|
||||||
|
|
||||||
@ -21,5 +21,5 @@ $CLICKHOUSE_CLIENT --query="SYSTEM FLUSH LOGS"
|
|||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'Seek')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ReadCompressedBytes')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UncompressedCacheHits')] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND (type = 2) ORDER BY event_time DESC LIMIT 1"
|
$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'Seek')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ReadCompressedBytes')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UncompressedCacheHits')] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND (type = 2) ORDER BY event_time DESC LIMIT 1"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS test.small_table"
|
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table"
|
||||||
|
|
||||||
|
@ -21,12 +21,12 @@ RENAME TABLE test.hits10 TO test.hits;
|
|||||||
|
|
||||||
SELECT count() FROM test.hits WHERE CounterID = 732797;
|
SELECT count() FROM test.hits WHERE CounterID = 732797;
|
||||||
|
|
||||||
RENAME TABLE test.hits TO hits, test.visits TO test.hits;
|
RENAME TABLE test.hits TO default.hits, test.visits TO test.hits;
|
||||||
|
|
||||||
SELECT sum(Sign) FROM test.hits WHERE CounterID = 912887;
|
SELECT sum(Sign) FROM test.hits WHERE CounterID = 912887;
|
||||||
SELECT count() FROM hits WHERE CounterID = 732797;
|
SELECT count() FROM default.hits WHERE CounterID = 732797;
|
||||||
|
|
||||||
RENAME TABLE test.hits TO test.visits, hits TO test.hits;
|
RENAME TABLE test.hits TO test.visits, default.hits TO test.hits;
|
||||||
|
|
||||||
SELECT count() FROM test.hits WHERE CounterID = 732797;
|
SELECT count() FROM test.hits WHERE CounterID = 732797;
|
||||||
SELECT sum(Sign) FROM test.visits WHERE CounterID = 912887;
|
SELECT sum(Sign) FROM test.visits WHERE CounterID = 912887;
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
|
export CLICKHOUSE_DATABASE=${CLICKHOUSE_DATABASE:="test"}
|
||||||
export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL:="warning"}
|
export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL:="warning"}
|
||||||
[ -n "$CLICKHOUSE_CONFIG_CLIENT" ] && CLICKHOUSE_CLIENT_OPT0+=" --config-file=${CLICKHOUSE_CONFIG_CLIENT} "
|
[ -n "$CLICKHOUSE_CONFIG_CLIENT" ] && CLICKHOUSE_CLIENT_OPT0+=" --config-file=${CLICKHOUSE_CONFIG_CLIENT} "
|
||||||
[ -n "${CLICKHOUSE_HOST}" ] && CLICKHOUSE_CLIENT_OPT0+=" --host=${CLICKHOUSE_HOST} "
|
[ -n "${CLICKHOUSE_HOST}" ] && CLICKHOUSE_CLIENT_OPT0+=" --host=${CLICKHOUSE_HOST} "
|
||||||
[ -n "${CLICKHOUSE_PORT_TCP}" ] && CLICKHOUSE_CLIENT_OPT0+=" --port=${CLICKHOUSE_PORT_TCP} "
|
[ -n "${CLICKHOUSE_PORT_TCP}" ] && CLICKHOUSE_CLIENT_OPT0+=" --port=${CLICKHOUSE_PORT_TCP} "
|
||||||
[ -n "${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}" ] && CLICKHOUSE_CLIENT_OPT0+=" --send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL} "
|
[ -n "${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}" ] && CLICKHOUSE_CLIENT_OPT0+=" --send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL} "
|
||||||
|
[ -n "${CLICKHOUSE_DATABASE}" ] && CLICKHOUSE_CLIENT_OPT0+=" --database=${CLICKHOUSE_DATABASE} "
|
||||||
|
|
||||||
export CLICKHOUSE_BINARY=${CLICKHOUSE_BINARY:="clickhouse"}
|
export CLICKHOUSE_BINARY=${CLICKHOUSE_BINARY:="clickhouse"}
|
||||||
[ -x "$CLICKHOUSE_BINARY-client" ] && CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:=$CLICKHOUSE_BINARY-client}
|
[ -x "$CLICKHOUSE_BINARY-client" ] && CLICKHOUSE_CLIENT_BINARY=${CLICKHOUSE_CLIENT_BINARY:=$CLICKHOUSE_BINARY-client}
|
||||||
|
12
debian/clickhouse-server-base.install
vendored
12
debian/clickhouse-server-base.install
vendored
@ -1,12 +0,0 @@
|
|||||||
usr/bin/clickhouse
|
|
||||||
usr/bin/clickhouse-server
|
|
||||||
usr/bin/clickhouse-clang
|
|
||||||
usr/bin/clickhouse-lld
|
|
||||||
usr/bin/clickhouse-copier
|
|
||||||
usr/bin/clickhouse-odbc-bridge
|
|
||||||
usr/bin/clickhouse-report
|
|
||||||
etc/systemd/system/clickhouse-server.service
|
|
||||||
etc/init.d/clickhouse-server
|
|
||||||
etc/cron.d/clickhouse-server
|
|
||||||
usr/share/clickhouse/*
|
|
||||||
etc/security/limits.d/clickhouse.conf
|
|
2
debian/control
vendored
2
debian/control
vendored
@ -15,7 +15,7 @@ Standards-Version: 3.9.8
|
|||||||
|
|
||||||
Package: clickhouse-client
|
Package: clickhouse-client
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}) | clickhouse-server-base (= ${binary:Version})
|
Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version})
|
||||||
Replaces: clickhouse-compressor
|
Replaces: clickhouse-compressor
|
||||||
Conflicts: clickhouse-compressor
|
Conflicts: clickhouse-compressor
|
||||||
Description: Client binary for ClickHouse
|
Description: Client binary for ClickHouse
|
||||||
|
4
debian/pbuilder-hooks/B90test-server
vendored
4
debian/pbuilder-hooks/B90test-server
vendored
@ -17,6 +17,10 @@ if [ "${PACKAGE_INSTALL}" ]; then
|
|||||||
apt install -y -f --allow-downgrades ||:
|
apt install -y -f --allow-downgrades ||:
|
||||||
dpkg -l | grep clickhouse ||:
|
dpkg -l | grep clickhouse ||:
|
||||||
|
|
||||||
|
# Second install to replace debian versions
|
||||||
|
dpkg --auto-deconfigure -i /tmp/buildd/*.deb ||:
|
||||||
|
dpkg -l | grep clickhouse ||:
|
||||||
|
|
||||||
# Some test references uses specific timezone
|
# Some test references uses specific timezone
|
||||||
ln -fs /usr/share/zoneinfo/Europe/Moscow /etc/localtime
|
ln -fs /usr/share/zoneinfo/Europe/Moscow /etc/localtime
|
||||||
echo 'Europe/Moscow' > /etc/timezone
|
echo 'Europe/Moscow' > /etc/timezone
|
||||||
|
5
debian/rules
vendored
5
debian/rules
vendored
@ -122,11 +122,6 @@ override_dh_install:
|
|||||||
touch $(DESTDIR)/etc/clickhouse-server/metrika/config.xml
|
touch $(DESTDIR)/etc/clickhouse-server/metrika/config.xml
|
||||||
touch $(DESTDIR)/etc/clickhouse-server/metrika/users.xml
|
touch $(DESTDIR)/etc/clickhouse-server/metrika/users.xml
|
||||||
|
|
||||||
# todo: remove after removing clickhouse-server-base package:
|
|
||||||
#mkdir -p $(DESTDIR)/etc/init.d $(DESTDIR)/etc/cron.d
|
|
||||||
#cp debian/clickhouse-server.init $(DESTDIR)/etc/init.d/clickhouse-server
|
|
||||||
#cp debian/clickhouse-server.cron.d $(DESTDIR)/etc/cron.d/clickhouse-server
|
|
||||||
|
|
||||||
dh_install --list-missing --sourcedir=$(DESTDIR)
|
dh_install --list-missing --sourcedir=$(DESTDIR)
|
||||||
|
|
||||||
override_dh_auto_install:
|
override_dh_auto_install:
|
||||||
|
Loading…
Reference in New Issue
Block a user