mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge pull request #28909 from vitlibar/convert-skip-list-into-first-line-comments
Convert skip_list .json into first line comments
This commit is contained in:
commit
512786346c
@ -80,7 +80,7 @@ LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "RENAM
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-client --query "SHOW TABLES FROM test"
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test -j 8 --testname --shard --zookeeper --print-time --use-skip-list 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_result.txt
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test -j 8 --testname --shard --zookeeper --print-time 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_result.txt
|
||||
|
||||
readarray -t FAILED_TESTS < <(awk '/FAIL|TIMEOUT|ERROR/ { print substr($3, 1, length($3)-1) }' "/test_result.txt")
|
||||
|
||||
@ -97,7 +97,7 @@ then
|
||||
|
||||
echo "Going to run again: ${FAILED_TESTS[*]}"
|
||||
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test --order=random --testname --shard --zookeeper --use-skip-list "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a /test_result.txt
|
||||
LLVM_PROFILE_FILE='client_coverage_%5m.profraw' clickhouse-test --order=random --testname --shard --zookeeper "${FAILED_TESTS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a /test_result.txt
|
||||
else
|
||||
echo "No failed tests"
|
||||
fi
|
||||
|
@ -262,153 +262,8 @@ function run_tests
|
||||
|
||||
start_server
|
||||
|
||||
TESTS_TO_SKIP=(
|
||||
00105_shard_collations
|
||||
00109_shard_totals_after_having
|
||||
00110_external_sort
|
||||
00302_http_compression
|
||||
00417_kill_query
|
||||
00436_convert_charset
|
||||
00490_special_line_separators_and_characters_outside_of_bmp
|
||||
00652_replicated_mutations_zookeeper
|
||||
00682_empty_parts_merge
|
||||
00701_rollup
|
||||
00834_cancel_http_readonly_queries_on_client_close
|
||||
00911_tautological_compare
|
||||
|
||||
# Hyperscan
|
||||
00926_multimatch
|
||||
00929_multi_match_edit_distance
|
||||
01681_hyperscan_debug_assertion
|
||||
02004_max_hyperscan_regex_length
|
||||
|
||||
01176_mysql_client_interactive # requires mysql client
|
||||
01031_mutations_interpreter_and_context
|
||||
01053_ssd_dictionary # this test mistakenly requires acces to /var/lib/clickhouse -- can't run this locally, disabled
|
||||
01083_expressions_in_engine_arguments
|
||||
01092_memory_profiler
|
||||
01098_msgpack_format
|
||||
01098_temporary_and_external_tables
|
||||
01103_check_cpu_instructions_at_startup # avoid dependency on qemu -- invonvenient when running locally
|
||||
01193_metadata_loading
|
||||
01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01251_dict_is_in_infinite_loop
|
||||
01259_dictionary_custom_settings_ddl
|
||||
01268_dictionary_direct_layout
|
||||
01280_ssd_complex_key_dictionary
|
||||
01281_group_by_limit_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||
01318_encrypt # Depends on OpenSSL
|
||||
01318_decrypt # Depends on OpenSSL
|
||||
01663_aes_msan # Depends on OpenSSL
|
||||
01667_aes_args_check # Depends on OpenSSL
|
||||
01683_codec_encrypted # Depends on OpenSSL
|
||||
01776_decrypt_aead_size_check # Depends on OpenSSL
|
||||
01811_filter_by_null # Depends on OpenSSL
|
||||
02012_sha512_fixedstring # Depends on OpenSSL
|
||||
01281_unsucceeded_insert_select_queries_counter
|
||||
01292_create_user
|
||||
01294_lazy_database_concurrent
|
||||
01305_replica_create_drop_zookeeper
|
||||
01354_order_by_tuple_collate_const
|
||||
01355_ilike
|
||||
01411_bayesian_ab_testing
|
||||
01798_uniq_theta_sketch
|
||||
01799_long_uniq_theta_sketch
|
||||
01890_stem # depends on libstemmer_c
|
||||
02003_compress_bz2 # depends on bzip2
|
||||
01059_storage_file_compression # depends on brotli and bzip2
|
||||
collate
|
||||
collation
|
||||
_orc_
|
||||
arrow
|
||||
avro
|
||||
base64
|
||||
brotli
|
||||
capnproto
|
||||
client
|
||||
ddl_dictionaries
|
||||
h3
|
||||
hashing
|
||||
hdfs
|
||||
java_hash
|
||||
json
|
||||
limit_memory
|
||||
live_view
|
||||
memory_leak
|
||||
memory_limit
|
||||
mysql
|
||||
odbc
|
||||
parallel_alter
|
||||
parquet
|
||||
protobuf
|
||||
secure
|
||||
sha256
|
||||
xz
|
||||
|
||||
# Not sure why these two fail even in sequential mode. Disabled for now
|
||||
# to make some progress.
|
||||
00646_url_engine
|
||||
00974_query_profiler
|
||||
|
||||
# In fasttest, ENABLE_LIBRARIES=0, so rocksdb engine is not enabled by default
|
||||
01504_rocksdb
|
||||
01686_rocksdb
|
||||
|
||||
# Look at DistributedFilesToInsert, so cannot run in parallel.
|
||||
01460_DistributedFilesToInsert
|
||||
|
||||
01541_max_memory_usage_for_user_long
|
||||
|
||||
# Require python libraries like scipy, pandas and numpy
|
||||
01322_ttest_scipy
|
||||
01561_mann_whitney_scipy
|
||||
|
||||
01545_system_errors
|
||||
# Checks system.errors
|
||||
01563_distributed_query_finish
|
||||
|
||||
# nc - command not found
|
||||
01601_proxy_protocol
|
||||
01622_defaults_for_url_engine
|
||||
|
||||
# JSON functions
|
||||
01666_blns
|
||||
|
||||
# Requires postgresql-client
|
||||
01802_test_postgresql_protocol_with_row_policy
|
||||
|
||||
# Depends on AWS
|
||||
01801_s3_cluster
|
||||
02012_settings_clause_for_s3
|
||||
|
||||
# needs psql
|
||||
01889_postgresql_protocol_null_fields
|
||||
|
||||
# needs pv
|
||||
01923_network_receive_time_metric_insert
|
||||
|
||||
01889_sqlite_read_write
|
||||
|
||||
# needs s2
|
||||
01849_geoToS2
|
||||
01851_s2_to_geo
|
||||
01852_s2_get_neighbours
|
||||
01853_s2_cells_intersect
|
||||
01854_s2_cap_contains
|
||||
01854_s2_cap_union
|
||||
|
||||
# needs s3
|
||||
01944_insert_partition_by
|
||||
|
||||
# depends on Go
|
||||
02013_zlib_read_after_eof
|
||||
|
||||
# Accesses CH via mysql table function (which is unavailable)
|
||||
01747_system_session_log_long
|
||||
)
|
||||
|
||||
time clickhouse-test --hung-check -j 8 --order=random --use-skip-list \
|
||||
--no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" \
|
||||
time clickhouse-test --hung-check -j 8 --order=random \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper \
|
||||
-- "$FASTTEST_FOCUS" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
|
@ -108,7 +108,7 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --use-skip-list --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ function run_tests()
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
|
||||
--use-skip-list --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
}
|
||||
|
@ -13,8 +13,4 @@ dpkg -i package_folder/clickhouse-test_*.deb
|
||||
|
||||
service clickhouse-server start && sleep 5
|
||||
|
||||
if grep -q -- "--use-skip-list" /usr/bin/clickhouse-test; then
|
||||
SKIP_LIST_OPT="--use-skip-list"
|
||||
fi
|
||||
|
||||
clickhouse-test --testname --shard --zookeeper "$SKIP_LIST_OPT" "$ADDITIONAL_OPTIONS" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
clickhouse-test --testname --shard --zookeeper "$ADDITIONAL_OPTIONS" "$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
|
@ -10,14 +10,6 @@ import logging
|
||||
import time
|
||||
|
||||
|
||||
def get_skip_list_cmd(path):
|
||||
with open(path, 'r') as f:
|
||||
for line in f:
|
||||
if '--use-skip-list' in line:
|
||||
return '--use-skip-list'
|
||||
return ''
|
||||
|
||||
|
||||
def get_options(i):
|
||||
options = []
|
||||
client_options = []
|
||||
@ -56,8 +48,6 @@ def get_options(i):
|
||||
|
||||
|
||||
def run_func_test(cmd, output_prefix, num_processes, skip_tests_option, global_time_limit):
|
||||
skip_list_opt = get_skip_list_cmd(cmd)
|
||||
|
||||
global_time_limit_option = ''
|
||||
if global_time_limit:
|
||||
global_time_limit_option = "--global_time_limit={}".format(global_time_limit)
|
||||
@ -66,7 +56,7 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option, global_t
|
||||
pipes = []
|
||||
for i in range(0, len(output_paths)):
|
||||
f = open(output_paths[i], 'w')
|
||||
full_command = "{} {} {} {} {}".format(cmd, skip_list_opt, get_options(i), global_time_limit_option, skip_tests_option)
|
||||
full_command = "{} {} {} {}".format(cmd, get_options(i), global_time_limit_option, skip_tests_option)
|
||||
logging.info("Run func tests '%s'", full_command)
|
||||
p = Popen(full_command, shell=True, stdout=f, stderr=f)
|
||||
pipes.append(p)
|
||||
|
@ -4,6 +4,7 @@ set (CLICKHOUSE_CLIENT_SOURCES
|
||||
QueryFuzzer.cpp
|
||||
Suggest.cpp
|
||||
TestHint.cpp
|
||||
TestTags.cpp
|
||||
)
|
||||
|
||||
set (CLICKHOUSE_CLIENT_LINK
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "QueryFuzzer.h"
|
||||
#include "Suggest.h"
|
||||
#include "TestHint.h"
|
||||
#include "TestTags.h"
|
||||
|
||||
#if USE_REPLXX
|
||||
# include <common/ReplxxLineReader.h>
|
||||
@ -1078,12 +1079,17 @@ private:
|
||||
|
||||
bool echo_query = echo_queries;
|
||||
|
||||
/// Test tags are started with "--" so they are interpreted as comments anyway.
|
||||
/// But if the echo is enabled we have to remove the test tags from `all_queries_text`
|
||||
/// because we don't want test tags to be echoed.
|
||||
size_t test_tags_length = test_mode ? getTestTagsLength(all_queries_text) : 0;
|
||||
|
||||
/// Several queries separated by ';'.
|
||||
/// INSERT data is ended by the end of line, not ';'.
|
||||
/// An exception is VALUES format where we also support semicolon in
|
||||
/// addition to end of line.
|
||||
|
||||
const char * this_query_begin = all_queries_text.data();
|
||||
const char * this_query_begin = all_queries_text.data() + test_tags_length;
|
||||
const char * all_queries_end = all_queries_text.data() + all_queries_text.size();
|
||||
|
||||
while (this_query_begin < all_queries_end)
|
||||
|
50
programs/client/TestTags.cpp
Normal file
50
programs/client/TestTags.cpp
Normal file
@ -0,0 +1,50 @@
|
||||
#include "TestTags.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
size_t getTestTagsLength(const String & multiline_query)
|
||||
{
|
||||
const String & text = multiline_query;
|
||||
size_t pos = 0;
|
||||
bool first_line = true;
|
||||
|
||||
while (true)
|
||||
{
|
||||
size_t line_start = pos;
|
||||
|
||||
/// Skip spaces.
|
||||
while ((pos != text.length()) && (text[pos] == ' ' || text[pos] == '\t'))
|
||||
++pos;
|
||||
|
||||
/// Skip comment "--".
|
||||
static constexpr const char comment[] = "--";
|
||||
if (text.compare(pos, strlen(comment), comment) != 0)
|
||||
return line_start;
|
||||
pos += strlen(comment);
|
||||
|
||||
/// Skip the prefix "Tags:" if it's the first line.
|
||||
if (first_line)
|
||||
{
|
||||
while ((pos != text.length()) && (text[pos] == ' ' || text[pos] == '\t'))
|
||||
++pos;
|
||||
|
||||
static constexpr const char tags_prefix[] = "Tags:";
|
||||
if (text.compare(pos, strlen(tags_prefix), tags_prefix) != 0)
|
||||
return 0;
|
||||
pos += strlen(tags_prefix);
|
||||
first_line = false;
|
||||
}
|
||||
|
||||
/// Skip end-of-line.
|
||||
size_t eol_pos = text.find_first_of("\r\n", pos);
|
||||
if (eol_pos == String::npos)
|
||||
return text.length();
|
||||
bool two_chars_eol = (eol_pos + 1 < text.length()) && ((text[eol_pos + 1] == '\r') || (text[eol_pos + 1] == '\n')) && (text[eol_pos + 1] != text[eol_pos]);
|
||||
size_t eol_length = two_chars_eol ? 2 : 1;
|
||||
pos = eol_pos + eol_length;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
18
programs/client/TestTags.h
Normal file
18
programs/client/TestTags.h
Normal file
@ -0,0 +1,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Returns the length of a text looking like
|
||||
/// -- Tags: x, y, z
|
||||
/// -- Tag x: explanation of tag x
|
||||
/// -- Tag y: explanation of tag y
|
||||
/// -- Tag z: explanation of tag z
|
||||
///
|
||||
/// at the beginning of a multiline query.
|
||||
/// If there are no test tags in the multiline query the function returns 0.
|
||||
size_t getTestTagsLength(const String & multiline_query);
|
||||
|
||||
}
|
@ -6,12 +6,11 @@ import os
|
||||
import os.path
|
||||
import signal
|
||||
import re
|
||||
import json
|
||||
import copy
|
||||
import traceback
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from typing import Tuple, Union, Optional, TextIO
|
||||
from typing import Tuple, Union, Optional, TextIO, Dict, Set, List
|
||||
import shlex
|
||||
import subprocess
|
||||
from subprocess import Popen
|
||||
@ -83,76 +82,6 @@ def stop_tests():
|
||||
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
|
||||
def json_minify(string):
|
||||
"""
|
||||
Removes all js-style comments from json string. Allows to have comments in skip_list.json.
|
||||
The code was taken from https://github.com/getify/JSON.minify/tree/python under the MIT license.
|
||||
"""
|
||||
|
||||
tokenizer = re.compile(r'"|(/\*)|(\*/)|(//)|\n|\r')
|
||||
end_slashes_re = re.compile(r'(\\)*$')
|
||||
|
||||
in_string = False
|
||||
in_multi = False
|
||||
in_single = False
|
||||
|
||||
new_str = []
|
||||
index = 0
|
||||
|
||||
for match in re.finditer(tokenizer, string):
|
||||
if not (in_multi or in_single):
|
||||
tmp = string[index:match.start()]
|
||||
new_str.append(tmp)
|
||||
else:
|
||||
# Replace comments with white space so that the JSON parser reports
|
||||
# the correct column numbers on parsing errors.
|
||||
new_str.append(' ' * (match.start() - index))
|
||||
|
||||
index = match.end()
|
||||
val = match.group()
|
||||
|
||||
if val == '"' and not (in_multi or in_single):
|
||||
escaped = end_slashes_re.search(string, 0, match.start())
|
||||
|
||||
# start of string or unescaped quote character to end string
|
||||
if not in_string or (escaped is None or len(escaped.group()) % 2 == 0): # noqa
|
||||
in_string = not in_string
|
||||
index -= 1 # include " character in next catch
|
||||
elif not (in_string or in_multi or in_single):
|
||||
if val == '/*':
|
||||
in_multi = True
|
||||
elif val == '//':
|
||||
in_single = True
|
||||
elif val == '*/' and in_multi and not (in_string or in_single):
|
||||
in_multi = False
|
||||
new_str.append(' ' * len(val))
|
||||
elif val in '\r\n' and not (in_multi or in_string) and in_single:
|
||||
in_single = False
|
||||
elif not in_multi or in_single: # noqa
|
||||
new_str.append(val)
|
||||
|
||||
if val in '\r\n':
|
||||
new_str.append(val)
|
||||
elif in_multi or in_single:
|
||||
new_str.append(' ' * len(val))
|
||||
|
||||
new_str.append(string[index:])
|
||||
return ''.join(new_str)
|
||||
|
||||
|
||||
def remove_control_characters(s):
|
||||
"""
|
||||
https://github.com/html5lib/html5lib-python/issues/96#issuecomment-43438438
|
||||
"""
|
||||
def str_to_int(s, default, base=10):
|
||||
if int(s, base) < 0x10000:
|
||||
return chr(int(s, base))
|
||||
return default
|
||||
s = re.sub(r"&#(\d+);?", lambda c: str_to_int(c.group(1), c.group(0)), s)
|
||||
s = re.sub(r"&#[xX]([0-9a-fA-F]+);?", lambda c: str_to_int(c.group(1), c.group(0), base=16), s)
|
||||
s = re.sub(r"[\x00-\x08\x0b\x0e-\x1f\x7f]", "", s)
|
||||
return s
|
||||
|
||||
|
||||
def get_db_engine(args, database_name):
|
||||
if args.replicated_database:
|
||||
@ -429,48 +358,69 @@ def print_test_time(test_time) -> str:
|
||||
return ''
|
||||
|
||||
|
||||
def should_skip_test_by_name(name: str, test_ext: str) -> Tuple[bool, str]:
|
||||
if args.skip and any(s in name for s in args.skip):
|
||||
return True, "skip"
|
||||
|
||||
if not args.zookeeper and ('zookeeper' in name or 'replica' in name):
|
||||
return True, "no zookeeper"
|
||||
|
||||
if not args.shard and \
|
||||
('shard' in name or 'distributed' in name or 'global' in name):
|
||||
return True, "no shard"
|
||||
|
||||
# Tests for races and deadlocks usually are run in a loop for a significant
|
||||
# amount of time
|
||||
if args.no_long and \
|
||||
('long' in name or 'deadlock' in name or 'race' in name):
|
||||
return True, "no long"
|
||||
|
||||
if not USE_JINJA and test_ext.endswith("j2"):
|
||||
return True, "no jinja"
|
||||
|
||||
return False, ""
|
||||
|
||||
|
||||
def should_skip_disabled_test(name: str, suite_dir: str) -> Tuple[bool, str]:
|
||||
disabled_file = os.path.join(suite_dir, name) + '.disabled'
|
||||
|
||||
if os.path.exists(disabled_file) and not args.disabled:
|
||||
return True, open(disabled_file, 'r').read()
|
||||
|
||||
return False, ""
|
||||
|
||||
|
||||
# should skip test, should increment skipped_total, skip reason
|
||||
def should_skip_test(name: str, test_ext: str, suite_dir: str) -> Tuple[bool, bool, str]:
|
||||
should_skip, skip_reason = should_skip_test_by_name(name, test_ext)
|
||||
def should_skip_test(name: str, test_ext: str, suite_dir: str, all_tags: Dict[str, Set[str]]) -> Tuple[bool, bool, str]:
|
||||
tags = all_tags.get(name + test_ext)
|
||||
|
||||
if should_skip:
|
||||
return True, True, skip_reason
|
||||
should_skip = False
|
||||
increment_skip_count = False
|
||||
skip_reason = ''
|
||||
|
||||
should_skip, skip_reason = should_skip_disabled_test(name, suite_dir)
|
||||
if tags and ('disabled' in tags) and not args.disabled:
|
||||
should_skip = True
|
||||
increment_skip_count = False
|
||||
skip_reason = 'disabled'
|
||||
|
||||
return should_skip, False, skip_reason
|
||||
elif os.path.exists(os.path.join(suite_dir, name) + '.disabled') and not args.disabled:
|
||||
should_skip = True
|
||||
increment_skip_count = False
|
||||
skip_reason = 'disabled'
|
||||
|
||||
elif args.skip and any(s in name for s in args.skip):
|
||||
should_skip = True
|
||||
increment_skip_count = True
|
||||
skip_reason = 'skip'
|
||||
|
||||
elif not USE_JINJA and test_ext.endswith("j2"):
|
||||
should_skip = True
|
||||
increment_skip_count = True
|
||||
skip_reason = 'no jinja'
|
||||
|
||||
elif tags and (('zookeeper' in tags) or ('replica' in tags)) and not args.zookeeper:
|
||||
should_skip = True
|
||||
increment_skip_count = True
|
||||
skip_reason = 'no zookeeper'
|
||||
|
||||
elif tags and (('shard' in tags) or ('distributed' in tags) or ('global' in tags)) and not args.shard:
|
||||
should_skip = True
|
||||
increment_skip_count = True
|
||||
skip_reason = 'no shard'
|
||||
|
||||
elif tags and ('no-fasttest' in tags) and args.fast_tests_only:
|
||||
should_skip = True
|
||||
increment_skip_count = True
|
||||
skip_reason = 'running fast tests only'
|
||||
|
||||
elif tags and (('long' in tags) or ('deadlock' in tags) or ('race' in tags)) and args.no_long:
|
||||
# Tests for races and deadlocks usually are run in a loop for a significant amount of time
|
||||
should_skip = True
|
||||
increment_skip_count = True
|
||||
skip_reason = 'not running long tests'
|
||||
|
||||
elif tags and ('no-replicated-database' in tags) and args.replicated_database:
|
||||
should_skip = True
|
||||
increment_skip_count = True
|
||||
skip_reason = 'replicated-database'
|
||||
|
||||
elif tags:
|
||||
for build_flag in args.build_flags:
|
||||
if 'no-' + build_flag in tags:
|
||||
should_skip = True
|
||||
increment_skip_count = True
|
||||
skip_reason = build_flag
|
||||
break
|
||||
|
||||
return should_skip, increment_skip_count, skip_reason
|
||||
|
||||
|
||||
def send_test_name_failed(suite: str, case: str) -> bool:
|
||||
@ -491,9 +441,9 @@ def send_test_name_failed(suite: str, case: str) -> bool:
|
||||
|
||||
restarted_tests = [] # (test, stderr)
|
||||
|
||||
# def run_tests_array(all_tests, suite, suite_dir, suite_tmp_dir, run_total):
|
||||
# def run_tests_array(all_tests, num_tests, suite, suite_dir, suite_tmp_dir, all_tags):
|
||||
def run_tests_array(all_tests_with_params):
|
||||
all_tests, num_tests, suite, suite_dir, suite_tmp_dir = all_tests_with_params
|
||||
all_tests, num_tests, suite, suite_dir, suite_tmp_dir, all_tags = all_tests_with_params
|
||||
global stop_time
|
||||
global exit_code
|
||||
global server_died
|
||||
@ -558,7 +508,7 @@ def run_tests_array(all_tests_with_params):
|
||||
status = "{0:72}".format(removesuffix(name, ".gen", ".sql") + ": ")
|
||||
|
||||
skip_test, increment_skip_count, skip_reason = \
|
||||
should_skip_test(name, ext, suite_dir)
|
||||
should_skip_test(name, ext, suite_dir, all_tags)
|
||||
|
||||
if skip_test:
|
||||
status += MSG_SKIPPED + f" - {skip_reason}\n"
|
||||
@ -782,16 +732,15 @@ def check_server_started(client, retry_count):
|
||||
|
||||
|
||||
class BuildFlags():
|
||||
THREAD = 'thread-sanitizer'
|
||||
ADDRESS = 'address-sanitizer'
|
||||
UNDEFINED = 'ub-sanitizer'
|
||||
MEMORY = 'memory-sanitizer'
|
||||
DEBUG = 'debug-build'
|
||||
UNBUNDLED = 'unbundled-build'
|
||||
RELEASE = 'release-build'
|
||||
DATABASE_ORDINARY = 'database-ordinary'
|
||||
THREAD = 'tsan'
|
||||
ADDRESS = 'asan'
|
||||
UNDEFINED = 'ubsan'
|
||||
MEMORY = 'msan'
|
||||
DEBUG = 'debug'
|
||||
UNBUNDLED = 'unbundled'
|
||||
RELEASE = 'release'
|
||||
ORDINARY_DATABASE = 'ordinary-database'
|
||||
POLYMORPHIC_PARTS = 'polymorphic-parts'
|
||||
DATABASE_REPLICATED = 'database-replicated'
|
||||
|
||||
|
||||
def collect_build_flags(client):
|
||||
@ -836,7 +785,7 @@ def collect_build_flags(client):
|
||||
|
||||
if clickhouse_proc.returncode == 0:
|
||||
if b'Ordinary' in stdout:
|
||||
result.append(BuildFlags.DATABASE_ORDINARY)
|
||||
result.append(BuildFlags.ORDINARY_DATABASE)
|
||||
else:
|
||||
raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr))
|
||||
|
||||
@ -902,7 +851,7 @@ def open_client_process(
|
||||
|
||||
|
||||
|
||||
def do_run_tests(jobs, suite, suite_dir, suite_tmp_dir, all_tests, parallel_tests, sequential_tests, parallel):
|
||||
def do_run_tests(jobs, suite, suite_dir, suite_tmp_dir, all_tests, all_tags, parallel_tests, sequential_tests, parallel):
|
||||
if jobs > 1 and len(parallel_tests) > 0:
|
||||
print("Found", len(parallel_tests), "parallel tests and", len(sequential_tests), "sequential tests")
|
||||
run_n, run_total = parallel.split('/')
|
||||
@ -920,7 +869,7 @@ def do_run_tests(jobs, suite, suite_dir, suite_tmp_dir, all_tests, parallel_test
|
||||
batch_size = max(1, len(parallel_tests) // jobs)
|
||||
parallel_tests_array = []
|
||||
for _ in range(jobs):
|
||||
parallel_tests_array.append((None, batch_size, suite, suite_dir, suite_tmp_dir))
|
||||
parallel_tests_array.append((None, batch_size, suite, suite_dir, suite_tmp_dir, all_tags))
|
||||
|
||||
with closing(multiprocessing.Pool(processes=jobs)) as pool:
|
||||
pool.map_async(run_tests_array, parallel_tests_array)
|
||||
@ -935,11 +884,11 @@ def do_run_tests(jobs, suite, suite_dir, suite_tmp_dir, all_tests, parallel_test
|
||||
|
||||
pool.join()
|
||||
|
||||
run_tests_array((sequential_tests, len(sequential_tests), suite, suite_dir, suite_tmp_dir))
|
||||
run_tests_array((sequential_tests, len(sequential_tests), suite, suite_dir, suite_tmp_dir, all_tags))
|
||||
return len(sequential_tests) + len(parallel_tests)
|
||||
else:
|
||||
num_tests = len(all_tests)
|
||||
run_tests_array((all_tests, num_tests, suite, suite_dir, suite_tmp_dir))
|
||||
run_tests_array((all_tests, num_tests, suite, suite_dir, suite_tmp_dir, all_tags))
|
||||
return num_tests
|
||||
|
||||
|
||||
@ -1053,23 +1002,10 @@ def main(args):
|
||||
"Server is not responding. Cannot execute 'SELECT 1' query. \
|
||||
If you are using split build, you have to specify -c option.")
|
||||
|
||||
build_flags = collect_build_flags(args.client)
|
||||
|
||||
if args.replicated_database:
|
||||
build_flags.append(BuildFlags.DATABASE_REPLICATED)
|
||||
|
||||
if args.use_skip_list:
|
||||
tests_to_skip_from_list = collect_tests_to_skip(args.skip_list_path, build_flags)
|
||||
else:
|
||||
tests_to_skip_from_list = set()
|
||||
args.build_flags = collect_build_flags(args.client)
|
||||
|
||||
if args.skip:
|
||||
args.skip = set(args.skip) | tests_to_skip_from_list
|
||||
else:
|
||||
args.skip = tests_to_skip_from_list
|
||||
|
||||
if args.use_skip_list and not args.sequential:
|
||||
args.sequential = collect_sequential_list(args.skip_list_path)
|
||||
args.skip = set(args.skip)
|
||||
|
||||
base_dir = os.path.abspath(args.queries)
|
||||
tmp_dir = os.path.abspath(args.tmp)
|
||||
@ -1148,19 +1084,21 @@ def main(args):
|
||||
all_tests = get_tests_list(
|
||||
suite_dir, args.test, args.test_runs, tests_in_suite_key_func)
|
||||
|
||||
jobs = args.jobs
|
||||
all_tags = read_test_tags(suite_dir, all_tests)
|
||||
|
||||
parallel_tests = []
|
||||
sequential_tests = []
|
||||
if args.sequential:
|
||||
for test in all_tests:
|
||||
if any(s in test for s in args.sequential):
|
||||
sequential_tests.append(test)
|
||||
else:
|
||||
sequential_tests = collect_sequential_list(all_tags)
|
||||
|
||||
for test in all_tests:
|
||||
if any(s in test for s in args.sequential):
|
||||
sequential_tests.append(test)
|
||||
else:
|
||||
parallel_tests.append(test)
|
||||
sequential_tests_set = set(sequential_tests)
|
||||
parallel_tests = [test for test in all_tests if test not in sequential_tests_set]
|
||||
|
||||
total_tests_run += do_run_tests(
|
||||
jobs, suite, suite_dir, suite_tmp_dir, all_tests, parallel_tests, sequential_tests, args.parallel)
|
||||
args.jobs, suite, suite_dir, suite_tmp_dir, all_tests, all_tags, parallel_tests, sequential_tests, args.parallel)
|
||||
|
||||
if server_died.is_set():
|
||||
exit_code.value = 1
|
||||
@ -1232,40 +1170,58 @@ def get_additional_client_options_url(args):
|
||||
return ''
|
||||
|
||||
|
||||
def collect_tests_to_skip(skip_list_path, build_flags):
|
||||
result = set([])
|
||||
def read_test_tags(suite_dir: str, all_tests: List[str]) -> Dict[str, Set[str]]:
|
||||
def get_comment_sign(filename):
|
||||
if filename.endswith('.sql') or filename.endswith('.sql.j2'):
|
||||
return '--'
|
||||
elif filename.endswith('.sh') or filename.endswith('.py') or filename.endswith('.expect'):
|
||||
return '#'
|
||||
else:
|
||||
raise Exception(f'Unknown file_extension: {filename}')
|
||||
|
||||
if not os.path.exists(skip_list_path):
|
||||
return result
|
||||
def parse_tags_from_line(line, comment_sign):
|
||||
if not line.startswith(comment_sign):
|
||||
return None
|
||||
tags_str = line[len(comment_sign):].lstrip()
|
||||
tags_prefix = "Tags:"
|
||||
if not tags_str.startswith(tags_prefix):
|
||||
return None
|
||||
tags_str = tags_str[len(tags_prefix):]
|
||||
tags = tags_str.split(',')
|
||||
tags = {tag.strip() for tag in tags}
|
||||
return tags
|
||||
|
||||
with open(skip_list_path, 'r') as skip_list_file:
|
||||
content = skip_list_file.read()
|
||||
def is_shebang(line):
|
||||
return line.startswith('#!')
|
||||
|
||||
# allows to have comments in skip_list.json
|
||||
skip_dict = json.loads(json_minify(content))
|
||||
def load_tags_from_file(filepath):
|
||||
with open(filepath, 'r') as file:
|
||||
try:
|
||||
line = file.readline()
|
||||
if is_shebang(line):
|
||||
line = file.readline()
|
||||
except UnicodeDecodeError:
|
||||
return []
|
||||
return parse_tags_from_line(line, get_comment_sign(filepath))
|
||||
|
||||
for build_flag in build_flags:
|
||||
result |= set(skip_dict[build_flag])
|
||||
|
||||
count = len(result)
|
||||
|
||||
if count > 0:
|
||||
print(f"Found file with skip-list {skip_list_path}, {count} test will be skipped")
|
||||
|
||||
return result
|
||||
all_tags = {}
|
||||
start_time = datetime.now()
|
||||
for test_name in all_tests:
|
||||
tags = load_tags_from_file(os.path.join(suite_dir, test_name))
|
||||
if tags:
|
||||
all_tags[test_name] = tags
|
||||
elapsed = (datetime.now() - start_time).total_seconds()
|
||||
if elapsed > 1:
|
||||
print(f"Tags for suite {suite_dir} read in {elapsed:.2f} seconds")
|
||||
return all_tags
|
||||
|
||||
|
||||
def collect_sequential_list(skip_list_path):
|
||||
if not os.path.exists(skip_list_path):
|
||||
return set([])
|
||||
|
||||
with open(skip_list_path, 'r') as skip_list_file:
|
||||
content = skip_list_file.read()
|
||||
# allows to have comments in skip_list.json
|
||||
skip_dict = json.loads(json_minify(content))
|
||||
if 'parallel' in skip_dict:
|
||||
return skip_dict['parallel']
|
||||
return set([])
|
||||
def collect_sequential_list(all_tags: Dict[str, Set[str]]) -> List[str]:
|
||||
res = []
|
||||
for test_name, tags in all_tags.items():
|
||||
if ('no-parallel' in tags) or ('sequential' in tags):
|
||||
res.append(test_name)
|
||||
return res
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@ -1308,10 +1264,9 @@ if __name__ == '__main__':
|
||||
parser.add_argument('--test-runs', default=1, nargs='?', type=int, help='Run each test many times (useful for e.g. flaky check)')
|
||||
parser.add_argument('-U', '--unified', default=3, type=int, help='output NUM lines of unified context')
|
||||
parser.add_argument('-r', '--server-check-retries', default=30, type=int, help='Num of tries to execute SELECT 1 before tests started')
|
||||
parser.add_argument('--skip-list-path', help="Path to skip-list file")
|
||||
parser.add_argument('--use-skip-list', action='store_true', default=False, help="Use skip list to skip tests if found")
|
||||
parser.add_argument('--db-engine', help='Database engine name')
|
||||
parser.add_argument('--replicated-database', action='store_true', default=False, help='Run tests with Replicated database engine')
|
||||
parser.add_argument('--fast-tests-only', action='store_true', default=False, help='Run only fast tests (the tests without the "no-fasttest" tag)')
|
||||
parser.add_argument('--no-stateless', action='store_true', help='Disable all stateless tests')
|
||||
parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests')
|
||||
parser.add_argument('--skip', nargs='+', help="Skip these tests")
|
||||
@ -1358,12 +1313,6 @@ if __name__ == '__main__':
|
||||
|
||||
print("Using queries from '" + args.queries + "' directory")
|
||||
|
||||
if args.skip_list_path is None:
|
||||
args.skip_list_path = os.path.join(args.queries, 'skip_list.json')
|
||||
|
||||
if args.sequential is None:
|
||||
args.sequential = set([])
|
||||
|
||||
if args.tmp is None:
|
||||
args.tmp = args.queries
|
||||
if args.client is None:
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT (dummy AS x) - 1 FROM remote('127.0.0.{2,3}', system, one)
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT count() FROM remote('127.0.0.{2,3}', system, one) WHERE arrayExists((x) -> x = 1, [1, 2, 3])
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: distributed
|
||||
|
||||
SELECT quantilesTiming(0.1, 0.5, 0.9)(dummy) FROM remote('127.0.0.{2,3}', system, one) GROUP BY 1 WITH TOTALS
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: distributed
|
||||
|
||||
SELECT NOT dummy FROM remote('127.0.0.{2,3}', system, one) WHERE NOT dummy
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: distributed
|
||||
|
||||
DROP TABLE IF EXISTS big_array;
|
||||
CREATE TABLE big_array (x Array(UInt8)) ENGINE=TinyLog;
|
||||
SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: zookeeper
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT number FROM remote('127.0.0.{2,3}', system, numbers) WHERE number GLOBAL IN (SELECT number FROM remote('127.0.0.{2,3}', system, numbers) WHERE number % 2 = 1 LIMIT 10) LIMIT 10;
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: long, replica, no-replicated-database
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
|
||||
DROP TABLE IF EXISTS replicated_alter1;
|
||||
DROP TABLE IF EXISTS replicated_alter2;
|
||||
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT toTypeName(1.0) FROM remote('127.0.0.{2,3}', system, one)
|
||||
|
@ -1,2 +1,4 @@
|
||||
-- Tags: replica
|
||||
|
||||
SELECT arrayFilter(x -> materialize(0), materialize([0])) AS p, arrayAll(y -> arrayExists(x -> y != x, p), p) AS test;
|
||||
SELECT arrayFilter(x -> materialize(0), materialize([''])) AS p, arrayAll(y -> arrayExists(x -> y != x, p), p) AS test;
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT -(-1) FROM remote('127.0.0.{2,3}', system, one)
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
DROP DATABASE IF EXISTS test_show_tables;
|
||||
|
||||
CREATE DATABASE test_show_tables;
|
||||
|
@ -1,3 +1,7 @@
|
||||
-- Tags: long, zookeeper, no-replicated-database, no-parallel
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
-- Tag no-parallel: leftovers
|
||||
|
||||
SET optimize_on_insert = 0;
|
||||
|
||||
DROP TABLE IF EXISTS merge_tree;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
DROP TABLE IF EXISTS report1;
|
||||
DROP TABLE IF EXISTS report2;
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS test_00101_0;
|
||||
|
||||
USE test_00101_0;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard, no-fasttest
|
||||
|
||||
SELECT 'Русский (default)';
|
||||
SELECT arrayJoin(['а', 'я', 'ё', 'А', 'Я', 'Ё']) AS x ORDER BY x;
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SET max_rows_to_group_by = 100000;
|
||||
SET group_by_overflow_mode = 'any';
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard, no-parallel, no-fasttest
|
||||
|
||||
SET max_rows_to_group_by = 100000;
|
||||
SET max_block_size = 100001;
|
||||
SET group_by_overflow_mode = 'any';
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-parallel, no-fasttest
|
||||
|
||||
SET max_memory_usage = 300000000;
|
||||
SET max_bytes_before_external_sort = 20000000;
|
||||
SELECT number FROM (SELECT number FROM system.numbers LIMIT 10000000) ORDER BY number * 1234567890123456789 LIMIT 9999990, 10;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: distributed
|
||||
|
||||
SET max_memory_usage = 300000000;
|
||||
SET max_bytes_before_external_sort = 20000000;
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SET totals_mode = 'after_having_auto';
|
||||
SET max_rows_to_group_by = 100000;
|
||||
SET group_by_overflow_mode = 'any';
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT intDiv(number, 100) AS k, length(groupArray(number)) FROM (SELECT * FROM system.numbers LIMIT 1000000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10;
|
||||
|
||||
SELECT '';
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: shard
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
DROP TABLE IF EXISTS set;
|
||||
DROP TABLE IF EXISTS set2;
|
||||
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: zookeeper, no-replicated-database
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
|
||||
DROP TABLE IF EXISTS alter_00121;
|
||||
CREATE TABLE alter_00121 (d Date, x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/alter_00121/t1', 'r1', d, (d), 8192);
|
||||
|
||||
|
@ -1,2 +1,4 @@
|
||||
-- Tags: distributed
|
||||
|
||||
SET max_distributed_connections = 1;
|
||||
SELECT count() + 1 FROM remote('127.0.0.{2,3}', system, one);
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: replica, distributed
|
||||
|
||||
SET max_parallel_replicas = 2;
|
||||
|
||||
DROP TABLE IF EXISTS report;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, shard, no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: distributed
|
||||
|
||||
SELECT DISTINCT number FROM remote('127.0.0.{2,3}', system.numbers) LIMIT 10
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS test2_00158;
|
||||
DROP TABLE IF EXISTS test2_00158.mt_buffer_00158;
|
||||
DROP TABLE IF EXISTS test2_00158.mt_00158;
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT toFloat64(dummy + 2) AS n, j1, j2 FROM remote('127.0.0.{2,3}', system.one) jr1 GLOBAL ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 10) jr2 USING n LIMIT 10;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SET any_join_distinct_right_table_keys = 1;
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) FROM remote('127.0.0.{2,3}', system.one) ORDER BY rand();
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: replica
|
||||
|
||||
SELECT
|
||||
number,
|
||||
range(number) AS arr,
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
DROP TABLE IF EXISTS t_00180;
|
||||
DROP TABLE IF EXISTS mv_00180;
|
||||
DROP TABLE IF EXISTS `.inner.mv_00180`;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SET send_logs_level = 'fatal';
|
||||
SELECT count() FROM remote('{127,1}.0.0.{2,3}', system.one) SETTINGS skip_unavailable_shards = 1;
|
||||
SELECT count() FROM remote('{1,127}.0.0.{2,3}', system.one) SETTINGS skip_unavailable_shards = 1;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: distributed
|
||||
|
||||
SELECT 'distributed_group_by_no_merge=1';
|
||||
SELECT count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one) SETTINGS distributed_group_by_no_merge=1;
|
||||
SELECT count(), uniq(dummy) FROM remote('127.0.0.{2,3,4,5}', system.one) SETTINGS distributed_group_by_no_merge=1;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,3 +1,4 @@
|
||||
-- Tags: long
|
||||
|
||||
/* timestamp 1419800400 == 2014-12-29 00:00:00 (Europe/Moscow) */
|
||||
/* timestamp 1412106600 == 2014-09-30 23:50:00 (Europe/Moscow) */
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: replica
|
||||
|
||||
DROP TABLE IF EXISTS parallel_replicas;
|
||||
DROP TABLE IF EXISTS parallel_replicas_backup;
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT * FROM (SELECT * WHERE dummy GLOBAL IN (SELECT 0));
|
||||
SELECT * FROM (SELECT * WHERE dummy GLOBAL IN (SELECT toUInt8(number) FROM system.numbers LIMIT 10));
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM system.numbers LIMIT 20) WHERE number GLOBAL IN (SELECT number FROM system.numbers LIMIT 10));
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: distributed
|
||||
|
||||
DROP TABLE IF EXISTS numbers_memory;
|
||||
CREATE TABLE numbers_memory AS system.numbers ENGINE = Memory;
|
||||
INSERT INTO numbers_memory SELECT number FROM system.numbers LIMIT 100;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT toUInt64(1) IN (1234567890, 2345678901, 3456789012, 4567890123, 5678901234, 6789012345, 7890123456, 8901234567, 9012345678, 123456789) AS x,
|
||||
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
|
||||
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: long, shard
|
||||
|
||||
-- uniqHLL12
|
||||
|
||||
SELECT 'uniqHLL12';
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: global
|
||||
|
||||
SELECT 1 GLOBAL IN (SELECT 1), 2 GLOBAL IN (SELECT 2) FROM remote('localhost', system.one);
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: long, zookeeper, no-replicated-database
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
|
||||
DROP TABLE IF EXISTS primary_key;
|
||||
CREATE TABLE primary_key (d Date DEFAULT today(), x Int8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00215/primary_key', 'r1', d, -x, 1);
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SET joined_subquery_requires_alias = 0;
|
||||
|
||||
SELECT k, a FROM (SELECT 42 AS k FROM remote('127.0.0.2', system.one)) GLOBAL ALL FULL OUTER JOIN (SELECT 42 AS k, 1 AS a, a) USING k;
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT x FROM (SELECT count() AS x FROM remote('127.0.0.2', system.one) WITH TOTALS) LIMIT 1;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: distributed
|
||||
|
||||
SET max_block_size = 1000;
|
||||
|
||||
DROP TABLE IF EXISTS numbers_10_00223;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: distributed
|
||||
|
||||
DROP TABLE IF EXISTS numbers_100k_log;
|
||||
CREATE TABLE numbers_100k_log ENGINE = Log AS SELECT * FROM system.numbers LIMIT 100000;
|
||||
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: long, zookeeper, no-replicated-database
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
|
||||
DROP TABLE IF EXISTS deduplication;
|
||||
CREATE TABLE deduplication (d Date DEFAULT '2015-01-01', x Int8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00226/deduplication', 'r1', d, x, 1);
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
select quantilesDeterministic(0.5, 0.9)(number, number) from (select number from system.numbers limit 101);
|
||||
-- test merge does not cause overflow
|
||||
select ignore(quantilesDeterministic(0.5, 0.9)(number, number)) from (select number from remote('127.0.0.{2,3}', system, numbers) limit 1000000);
|
||||
|
@ -1,3 +1,6 @@
|
||||
-- Tags: long, replica, no-replicated-database
|
||||
-- Tag no-replicated-database: Old syntax is not allowed
|
||||
|
||||
SET replication_alter_partitions_sync = 2;
|
||||
|
||||
DROP TABLE IF EXISTS attach_r1;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
DROP TABLE IF EXISTS storage;
|
||||
CREATE TABLE storage(UserID UInt64) ENGINE=Memory;
|
||||
INSERT INTO storage(UserID) values (6460432721393873721)(6460432721393873721)(6460432721393873721)(6460432721393873721)(6460432721393873721)(6460432721393873721)(6460432721393873721)(402895971392036118)(402895971392036118)(402895971392036118);
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
select 40 as z from (select * from system.numbers limit 3) group by z;
|
||||
select 41 as z from remote('127.0.0.{2,3}', system.one) group by z;
|
||||
select count(), 42 AS z from remote('127.0.0.{2,3}', system.one) group by z;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-fasttest
|
||||
|
||||
SELECT cityHash64(1, 2, '') AS x1, cityHash64((1, 2), '') AS x2, cityHash64(1, (2, '')) AS x3, cityHash64((1, 2, '')) AS x4;
|
||||
SELECT cityHash64(materialize(1), 2, '') AS x1, cityHash64((materialize(1), 2), '') AS x2, cityHash64(materialize(1), (2, '')) AS x3, cityHash64((materialize(1), 2, '')) AS x4;
|
||||
SELECT cityHash64(1, materialize(2), '') AS x1, cityHash64((1, materialize(2)), '') AS x2, cityHash64(1, (materialize(2), '')) AS x3, cityHash64((1, materialize(2), '')) AS x4;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: race
|
||||
|
||||
SELECT 'a}a' AS x, x LIKE (concat('%', x, '%') AS pat), materialize(x) LIKE pat;
|
||||
SELECT 'a}a' AS x, x LIKE (concat('%', x) AS pat), materialize(x) LIKE pat;
|
||||
SELECT 'a}a' AS x, x LIKE (concat(x, '%') AS pat), materialize(x) LIKE pat;
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT 1 GLOBAL IN (SELECT 1) AS s, s FROM remote('127.0.0.{2,3}', system.one);
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT length(groupArray(number)), count() FROM (SELECT number FROM system.numbers_mt LIMIT 1000000);
|
||||
SELECT groupArray(dummy), count() FROM remote('127.0.0.{2,3}', system.one);
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT quantileExactWeighted(0.5)(number, 1) FROM (SELECT number FROM system.numbers LIMIT 1001);
|
||||
SELECT quantilesExactWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, 1) FROM (SELECT number FROM system.numbers LIMIT 1001);
|
||||
SELECT quantilesExactWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, number) FROM (SELECT number FROM system.numbers LIMIT 1001);
|
||||
|
@ -1 +1,3 @@
|
||||
-- Tags: long
|
||||
|
||||
select reinterpretAsFloat64(unhex('875635ffffffbfbe'))
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
DROP TABLE IF EXISTS numbers_10_00290;
|
||||
SET max_block_size = 1000;
|
||||
CREATE TABLE numbers_10_00290 ENGINE = Log AS SELECT * FROM system.numbers LIMIT 10000;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SET max_subquery_depth = 3;
|
||||
|
||||
SELECT 1 FROM remote('127.0.0.{1,2}', system.one) WHERE 1 GLOBAL IN (SELECT 1 FROM remote('127.0.0.{2,3}', system.one) WHERE 1 GLOBAL IN (SELECT 1 FROM remote('127.0.0.{2,3}', system.one) WHERE 1 GLOBAL IN (SELECT 1 FROM remote('127.0.0.{2,3}', system.one))));
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
set max_threads = 1;
|
||||
drop table if exists enums;
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
SET output_format_write_statistics = 0;
|
||||
SELECT arrayJoin(range(100)) AS x FROM remote('127.0.0.2', system.one) WHERE x GLOBAL IN (SELECT toUInt8(arrayJoin(range(100)) + 50)) GROUP BY x ORDER BY x LIMIT 10 FORMAT JSONCompact;
|
||||
SELECT arrayJoin(range(100)) AS x FROM remote('127.0.0.{2,3}', system.one) WHERE x GLOBAL IN (SELECT toUInt8(arrayJoin(range(100)) + 50)) GROUP BY x ORDER BY x LIMIT 10 FORMAT JSONCompact;
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: disabled, zookeeper, no-parallel
|
||||
|
||||
DROP TABLE IF EXISTS r1;
|
||||
DROP TABLE IF EXISTS r2;
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-parallel
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: no-parallel
|
||||
|
||||
DROP TABLE IF EXISTS array_pk;
|
||||
CREATE TABLE array_pk (key Array(UInt8), s String, n UInt64, d Date MATERIALIZED '2000-01-01') ENGINE = MergeTree(d, (key, s, n), 1);
|
||||
|
||||
|
@ -1,2 +1,4 @@
|
||||
-- Tags: no-fasttest
|
||||
|
||||
SELECT cityHash64(*) FROM (SELECT 1 AS x, CAST(x AS Enum8('Hello' = 0, 'World' = 1)) AS y);
|
||||
SELECT cityHash64(*) FROM (SELECT 1 AS x, x AS y);
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: disabled
|
||||
|
||||
DROP TABLE IF EXISTS replacing;
|
||||
CREATE TABLE replacing (d Date, k UInt64, s String, v UInt16) ENGINE = ReplacingMergeTree(d, k, 8192, v);
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
-- Tags: long
|
||||
|
||||
SELECT 'Trivial case';
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: long
|
||||
|
||||
/* Trivial case */
|
||||
|
||||
SELECT CASE WHEN 1 THEN 2 WHEN 3 THEN 4 ELSE 5 END;
|
||||
|
@ -1,2 +1,4 @@
|
||||
-- Tags: no-parallel, no-fasttest
|
||||
|
||||
SELECT quantileTiming(number) FROM (SELECT * FROM system.numbers LIMIT 10000);
|
||||
SELECT floor(log2(1 + number) / log2(1.5)) AS k, count() AS c, quantileTiming(number % 10000) AS q FROM (SELECT * FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k;
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: race, shard
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
|
@ -1,2 +1,4 @@
|
||||
-- Tags: shard
|
||||
|
||||
SELECT anyHeavy(x) FROM (SELECT intHash64(number) % 100 < 60 ? 999 : number AS x FROM system.numbers LIMIT 100000);
|
||||
SELECT anyHeavy(1) FROM remote('127.0.0.{2,3}', system.one);
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: replica
|
||||
|
||||
DROP TABLE IF EXISTS bad_arrays;
|
||||
CREATE TABLE bad_arrays (a Array(String), b Array(UInt8)) ENGINE = Memory;
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
|
||||
set -e
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
DROP TABLE IF EXISTS group_uniq_str;
|
||||
CREATE TABLE group_uniq_str ENGINE = Memory AS SELECT number % 10 as id, toString(intDiv((number%10000), 10)) as v FROM system.numbers LIMIT 10000000;
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
DROP TABLE IF EXISTS group_uniq_arr_int;
|
||||
CREATE TABLE group_uniq_arr_int ENGINE = Memory AS
|
||||
SELECT g as id, if(c == 0, [v], if(c == 1, emptyArrayInt64(), [v, v])) as v FROM
|
||||
|
@ -1,3 +1,5 @@
|
||||
-- Tags: shard
|
||||
|
||||
DROP TABLE IF EXISTS group_uniq_arr_str;
|
||||
CREATE TABLE group_uniq_arr_str ENGINE = Memory AS
|
||||
SELECT hex(intHash32(g)) as id, if(c == 0, [hex(v)], if(c == 1, emptyArrayString(), [hex(v), hex(v)])) as v FROM
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user