mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 08:40:50 +00:00
Merge remote-tracking branch 'origin' into integration-2
This commit is contained in:
commit
2b02689213
2
contrib/libunwind
vendored
2
contrib/libunwind
vendored
@ -1 +1 @@
|
||||
Subproject commit 8fe25d7dc70f2a4ea38c3e5a33fa9d4199b67a5a
|
||||
Subproject commit 1e4a2e5ce77be1af12e918a3c15dccf2bbac587d
|
@ -44,7 +44,7 @@ parser.add_argument('--port', nargs='*', default=[9000], help="Space-separated l
|
||||
parser.add_argument('--runs', type=int, default=1, help='Number of query runs per server.')
|
||||
parser.add_argument('--max-queries', type=int, default=None, help='Test no more than this number of queries, chosen at random.')
|
||||
parser.add_argument('--queries-to-run', nargs='*', type=int, default=None, help='Space-separated list of indexes of queries to test.')
|
||||
parser.add_argument('--max-query-seconds', type=int, default=10, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||
parser.add_argument('--max-query-seconds', type=int, default=15, help='For how many seconds at most a query is allowed to run. The script finishes with error if this time is exceeded.')
|
||||
parser.add_argument('--profile-seconds', type=int, default=0, help='For how many seconds to profile a query for which the performance has changed.')
|
||||
parser.add_argument('--long', action='store_true', help='Do not skip the tests tagged as long.')
|
||||
parser.add_argument('--print-queries', action='store_true', help='Print test queries and exit.')
|
||||
@ -273,8 +273,14 @@ for query_index in queries_to_run:
|
||||
prewarm_id = f'{query_prefix}.prewarm0'
|
||||
|
||||
try:
|
||||
# Will also detect too long queries during warmup stage
|
||||
res = c.execute(q, query_id = prewarm_id, settings = {'max_execution_time': args.max_query_seconds})
|
||||
# During the warmup runs, we will also:
|
||||
# * detect queries that are exceedingly long, to fail fast,
|
||||
# * collect profiler traces, which might be helpful for analyzing
|
||||
# test coverage. We disable profiler for normal runs because
|
||||
# it makes the results unstable.
|
||||
res = c.execute(q, query_id = prewarm_id,
|
||||
settings = {'max_execution_time': args.max_query_seconds,
|
||||
'query_profiler_real_time_period_ns': 10000000})
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (prewarm_id, *e.args)
|
||||
@ -359,10 +365,11 @@ for query_index in queries_to_run:
|
||||
# For very short queries we have a special mode where we run them for at
|
||||
# least some time. The recommended lower bound of run time for "normal"
|
||||
# queries is about 0.1 s, and we run them about 10 times, giving the
|
||||
# time per query per server of about one second. Use this value as a
|
||||
# reference for "short" queries.
|
||||
# time per query per server of about one second. Run "short" queries
|
||||
# for longer time, because they have a high percentage of overhead and
|
||||
# might give less stable results.
|
||||
if is_short[query_index]:
|
||||
if server_seconds >= 2 * len(this_query_connections):
|
||||
if server_seconds >= 8 * len(this_query_connections):
|
||||
break
|
||||
# Also limit the number of runs, so that we don't go crazy processing
|
||||
# the results -- 'eqmed.sql' is really suboptimal.
|
||||
|
@ -446,6 +446,9 @@ if args.report == 'main':
|
||||
attrs[3] = f'style="background: {color_bad}"'
|
||||
else:
|
||||
attrs[3] = ''
|
||||
# Just don't add the slightly unstable queries we don't consider
|
||||
# errors. It's not clear what the user should do with them.
|
||||
continue
|
||||
|
||||
text += tableRow(r, attrs, anchor)
|
||||
|
||||
@ -553,12 +556,11 @@ if args.report == 'main':
|
||||
error_tests += unstable_partial_queries
|
||||
status = 'failure'
|
||||
|
||||
if unstable_queries:
|
||||
message_array.append(str(unstable_queries) + ' unstable')
|
||||
|
||||
# Disabled before fix.
|
||||
# if very_unstable_queries:
|
||||
# status = 'failure'
|
||||
# Don't show mildly unstable queries, only the very unstable ones we
|
||||
# treat as errors.
|
||||
if very_unstable_queries:
|
||||
status = 'failure'
|
||||
message_array.append(str(very_unstable_queries) + ' unstable')
|
||||
|
||||
error_tests += slow_average_tests
|
||||
if error_tests:
|
||||
|
@ -57,7 +57,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
|
||||
## YAML examples {#example}
|
||||
|
||||
Here you can see default config written in YAML: [config-example.yaml](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config-example.yaml).
|
||||
Here you can see default config written in YAML: [config.yaml.example](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.yaml.example).
|
||||
|
||||
There are some differences between YAML and XML formats in terms of ClickHouse configurations. Here are some tips for writing a configuration in YAML format.
|
||||
|
||||
|
@ -6,9 +6,9 @@ toc_title: "Конфигурационные файлы"
|
||||
|
||||
# Конфигурационные файлы {#configuration_files}
|
||||
|
||||
Основной конфигурационный файл сервера - `config.xml`. Он расположен в директории `/etc/clickhouse-server/`.
|
||||
Основной конфигурационный файл сервера - `config.xml` или `config.yaml`. Он расположен в директории `/etc/clickhouse-server/`.
|
||||
|
||||
Отдельные настройки могут быть переопределены в файлах `*.xml` и `*.conf` из директории `config.d` рядом с конфигом.
|
||||
Отдельные настройки могут быть переопределены в файлах `*.xml` и `*.conf`, а также `.yaml` (для файлов в формате YAML) из директории `config.d` рядом с конфигом.
|
||||
|
||||
У элементов этих конфигурационных файлов могут быть указаны атрибуты `replace` или `remove`.
|
||||
|
||||
@ -25,7 +25,7 @@ toc_title: "Конфигурационные файлы"
|
||||
В элементе `users_config` файла `config.xml` можно указать относительный путь к конфигурационному файлу с настройками пользователей, профилей и квот. Значение `users_config` по умолчанию — `users.xml`. Если `users_config` не указан, то настройки пользователей, профилей и квот можно задать непосредственно в `config.xml`.
|
||||
|
||||
Настройки пользователя могут быть разделены в несколько отдельных файлов аналогичных `config.xml` и `config.d\`. Имя директории задаётся также как `users_config`.
|
||||
Имя директории задаётся так же, как имя файла в `users_config`, с подстановкой `.d` вместо `.xml`.
|
||||
Имя директории задаётся так же, как имя файла в `users_config`, с подстановкой `.d` вместо `.xml`/`.yaml`.
|
||||
Директория `users.d` используется по умолчанию, также как `users.xml` используется для `users_config`.
|
||||
Например, можно иметь по отдельному конфигурационному файлу для каждого пользователя:
|
||||
|
||||
@ -52,3 +52,66 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
|
||||
Сервер следит за изменениями конфигурационных файлов, а также файлов и ZooKeeper-узлов, которые были использованы при выполнении подстановок и переопределений, и перезагружает настройки пользователей и кластеров на лету. То есть, можно изменять кластера, пользователей и их настройки без перезапуска сервера.
|
||||
|
||||
## Примеры записи конфигурации на YAML {#example}
|
||||
|
||||
Здесь можно рассмотреть пример реальной конфигурации записанной на YAML: [config.yaml.example](https://github.com/ClickHouse/ClickHouse/blob/master/programs/server/config.yaml.example).
|
||||
|
||||
Между стандартами XML и YAML имеются различия, поэтому в этом разделе будут перечислены некоторые подсказки для написания конфигурации на YMAL.
|
||||
|
||||
Для записи обычной пары ключ-значение следует использовать Scalar:
|
||||
``` yaml
|
||||
key: value
|
||||
```
|
||||
|
||||
Для создания тега, содержащего подтеги следует использовать Map:
|
||||
``` yaml
|
||||
map_key:
|
||||
key1: val1
|
||||
key2: val2
|
||||
key3: val3
|
||||
```
|
||||
|
||||
Для создания списка значений или подтегов, расположенных по определенному ключу, следует использовать Sequence:
|
||||
``` yaml
|
||||
seq_key:
|
||||
- val1
|
||||
- val2
|
||||
- key1: val3
|
||||
- map:
|
||||
key2: val4
|
||||
key3: val5
|
||||
```
|
||||
|
||||
В случае, усли необходимо объявить тег, аналогичный XML-атрибуту, необходимо задать скаляр, имеющий ключ с префиксом @ и заключенный в кавычки:
|
||||
|
||||
``` yaml
|
||||
map:
|
||||
"@attr1": value1
|
||||
"@attr2": value2
|
||||
key: 123
|
||||
```
|
||||
|
||||
Из такой Map мы получим после конвертации:
|
||||
|
||||
``` xml
|
||||
<map attr1="value1" attr2="value2">
|
||||
<key>123</key>
|
||||
</map>
|
||||
```
|
||||
|
||||
Помимо Map, можно задавать атрибуты для Sequence:
|
||||
|
||||
``` yaml
|
||||
seq:
|
||||
- "@attr1": value1
|
||||
- "@attr2": value2
|
||||
- 123
|
||||
- abc
|
||||
```
|
||||
|
||||
Таким образом получая аналог следующей записи на XML:
|
||||
|
||||
``` xml
|
||||
<seq attr1="value1" attr2="value2">123</seq>
|
||||
<seq attr1="value1" attr2="value2">abc</seq>
|
||||
```
|
||||
|
@ -1,4 +1,8 @@
|
||||
# NOTE: User and query level settings are set up in "users.xml" file.
|
||||
# This is an example of a configuration file "config.xml" rewritten in YAML
|
||||
# You can read this documentation for detailed information about YAML configuration:
|
||||
# https://clickhouse.tech/docs/en/operations/configuration-files/
|
||||
|
||||
# NOTE: User and query level settings are set up in "users.yaml" file.
|
||||
# If you have accidentally specified user-level settings here, server won't start.
|
||||
# You can either move the settings to the right place inside "users.xml" file
|
||||
# or add skip_check_for_incorrect_settings: 1 here.
|
107
programs/server/users.yaml.example
Normal file
107
programs/server/users.yaml.example
Normal file
@ -0,0 +1,107 @@
|
||||
# Profiles of settings.
|
||||
profiles:
|
||||
# Default settings.
|
||||
default:
|
||||
# Maximum memory usage for processing single query, in bytes.
|
||||
max_memory_usage: 10000000000
|
||||
|
||||
# How to choose between replicas during distributed query processing.
|
||||
# random - choose random replica from set of replicas with minimum number of errors
|
||||
# nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
# with minimum number of different symbols between replica's hostname and local hostname (Hamming distance).
|
||||
# in_order - first live replica is chosen in specified order.
|
||||
# first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
load_balancing: random
|
||||
|
||||
# Profile that allows only read queries.
|
||||
readonly:
|
||||
readonly: 1
|
||||
|
||||
# Users and ACL.
|
||||
users:
|
||||
# If user name was not specified, 'default' user is used.
|
||||
default:
|
||||
# Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
#
|
||||
# If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
# Example: password: qwerty
|
||||
# Password could be empty.
|
||||
#
|
||||
# If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
# Example: password_sha256_hex: 65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5
|
||||
# Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
#
|
||||
# If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
# Example: password_double_sha1_hex: e395796d6546b1b65db9d665cd43f0e858dd4303
|
||||
#
|
||||
# If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
|
||||
# place its name in 'server' element inside 'ldap' element.
|
||||
# Example: ldap:
|
||||
# server: my_ldap_server
|
||||
#
|
||||
# If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
|
||||
# place 'kerberos' element instead of 'password' (and similar) elements.
|
||||
# The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
|
||||
# You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
|
||||
# whose initiator's realm matches it.
|
||||
# Example: kerberos: ''
|
||||
# Example: kerberos:
|
||||
# realm: EXAMPLE.COM
|
||||
#
|
||||
# How to generate decent password:
|
||||
# Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
# In first line will be password and in second - corresponding SHA256.
|
||||
#
|
||||
# How to generate double SHA1:
|
||||
# Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||
# In first line will be password and in second - corresponding double SHA1.
|
||||
|
||||
password: ''
|
||||
|
||||
# List of networks with open access.
|
||||
#
|
||||
# To open access from everywhere, specify:
|
||||
# - ip: '::/0'
|
||||
#
|
||||
# To open access only from localhost, specify:
|
||||
# - ip: '::1'
|
||||
# - ip: 127.0.0.1
|
||||
#
|
||||
# Each element of list has one of the following forms:
|
||||
# ip: IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
# 2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
# host: Hostname. Example: server01.yandex.ru.
|
||||
# To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
# host_regexp: Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
||||
# To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
# Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
# Strongly recommended that regexp is ends with $ and take all expression in ''
|
||||
# All results of DNS requests are cached till server restart.
|
||||
|
||||
networks:
|
||||
ip: '::/0'
|
||||
|
||||
# Settings profile for user.
|
||||
profile: default
|
||||
|
||||
# Quota for user.
|
||||
quota: default
|
||||
|
||||
# User can create other users and grant rights to them.
|
||||
# access_management: 1
|
||||
|
||||
# Quotas.
|
||||
quotas:
|
||||
# Name of quota.
|
||||
default:
|
||||
# Limits for time interval. You could specify many intervals with different limits.
|
||||
interval:
|
||||
# Length of interval.
|
||||
duration: 3600
|
||||
|
||||
# No limits. Just calculate resource usage for time interval.
|
||||
queries: 0
|
||||
errors: 0
|
||||
result_rows: 0
|
||||
read_rows: 0
|
||||
execution_time: 0
|
@ -25,7 +25,7 @@ TEST(ThreadPool, GlobalFull1)
|
||||
std::atomic<size_t> counter = 0;
|
||||
static constexpr size_t num_jobs = capacity + 1;
|
||||
|
||||
auto func = [&] { ++counter; while (counter != num_jobs) {} };
|
||||
auto func = [&] { ++counter; while (counter != num_jobs) {} }; //-V776
|
||||
|
||||
ThreadPool pool(num_jobs);
|
||||
|
||||
@ -63,7 +63,7 @@ TEST(ThreadPool, GlobalFull2)
|
||||
global_pool.wait();
|
||||
|
||||
std::atomic<size_t> counter = 0;
|
||||
auto func = [&] { ++counter; while (counter != capacity + 1) {} };
|
||||
auto func = [&] { ++counter; while (counter != capacity + 1) {} }; //-V776
|
||||
|
||||
ThreadPool pool(capacity, 0, capacity);
|
||||
for (size_t i = 0; i < capacity; ++i)
|
||||
|
@ -33,6 +33,7 @@ SRCS(
|
||||
Config/AbstractConfigurationComparison.cpp
|
||||
Config/ConfigProcessor.cpp
|
||||
Config/ConfigReloader.cpp
|
||||
Config/YAMLParser.cpp
|
||||
Config/configReadClient.cpp
|
||||
CurrentMemoryTracker.cpp
|
||||
CurrentMetrics.cpp
|
||||
|
@ -540,7 +540,7 @@ Block ActionsDAG::updateHeader(Block header) const
|
||||
|
||||
struct Frame
|
||||
{
|
||||
const Node * node;
|
||||
const Node * node = nullptr;
|
||||
size_t next_child = 0;
|
||||
};
|
||||
|
||||
@ -587,8 +587,7 @@ Block ActionsDAG::updateHeader(Block header) const
|
||||
}
|
||||
}
|
||||
|
||||
auto & column = node_to_column[output];
|
||||
if (column.column)
|
||||
if (node_to_column[output].column)
|
||||
result_columns.push_back(node_to_column[output]);
|
||||
}
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
|
||||
}
|
||||
|
||||
/// We use global context here, because storages lifetime is bigger than query context lifetime
|
||||
database->loadStoredObjects(getContext()->getGlobalContext(), has_force_restore_data_flag, create.attach && force_attach);
|
||||
database->loadStoredObjects(getContext()->getGlobalContext(), has_force_restore_data_flag, create.attach && force_attach); //-V560
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -26,7 +26,7 @@ void ParserErrorListener::syntaxError(
|
||||
auto * parser = dynamic_cast<ClickHouseParser *>(recognizer);
|
||||
assert(parser);
|
||||
|
||||
LOG_ERROR(&Poco::Logger::get("ClickHouseParser"),
|
||||
LOG_ERROR(&Poco::Logger::get("ClickHouseParser"), //-V522
|
||||
"Last element parsed so far:\n"
|
||||
"{}\n"
|
||||
"Parser error: (pos {}) {}", parser->getRuleContext()->toStringTree(parser, true), token->getStartIndex(), message);
|
||||
|
@ -578,6 +578,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts(
|
||||
MergeTreeDataSelectSamplingData sampling = use_cache ? std::move(cache->sampling) : MergeTreeDataSelectSamplingData{};
|
||||
if (!use_cache)
|
||||
{
|
||||
assert(key_condition.has_value());
|
||||
|
||||
RelativeSize relative_sample_size = 0;
|
||||
RelativeSize relative_sample_offset = 0;
|
||||
|
||||
@ -606,7 +608,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts(
|
||||
/// read) into the relative `SAMPLE 0.1` (how much data to read).
|
||||
size_t approx_total_rows = 0;
|
||||
if (relative_sample_size > 1 || relative_sample_offset > 1)
|
||||
approx_total_rows = getApproximateTotalRowsToRead(parts, metadata_snapshot, *key_condition, settings);
|
||||
approx_total_rows = getApproximateTotalRowsToRead(parts, metadata_snapshot, *key_condition, settings); //-V1007
|
||||
|
||||
if (relative_sample_size > 1)
|
||||
{
|
||||
@ -765,7 +767,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts(
|
||||
|
||||
if (has_lower_limit)
|
||||
{
|
||||
if (!key_condition->addCondition(sampling_key.column_names[0], Range::createLeftBounded(lower, true)))
|
||||
if (!key_condition->addCondition(sampling_key.column_names[0], Range::createLeftBounded(lower, true))) //-V1007
|
||||
throw Exception("Sampling column not in primary key", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
ASTPtr args = std::make_shared<ASTExpressionList>();
|
||||
@ -782,7 +784,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts(
|
||||
|
||||
if (has_upper_limit)
|
||||
{
|
||||
if (!key_condition->addCondition(sampling_key.column_names[0], Range::createRightBounded(upper, false)))
|
||||
if (!key_condition->addCondition(sampling_key.column_names[0], Range::createRightBounded(upper, false))) //-V1007
|
||||
throw Exception("Sampling column not in primary key", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
ASTPtr args = std::make_shared<ASTExpressionList>();
|
||||
|
@ -15,7 +15,7 @@ class KeyCondition;
|
||||
|
||||
struct MergeTreeDataSelectSamplingData
|
||||
{
|
||||
bool use_sampling;
|
||||
bool use_sampling = false;
|
||||
std::shared_ptr<ASTFunction> filter_function;
|
||||
ActionsDAGPtr filter_expression;
|
||||
};
|
||||
|
@ -173,7 +173,7 @@ void MergeTreeReaderWide::readData(
|
||||
{
|
||||
auto get_stream_getter = [&](bool stream_for_prefix) -> ISerialization::InputStreamGetter
|
||||
{
|
||||
return [&, stream_for_prefix](const ISerialization::SubstreamPath & substream_path) -> ReadBuffer *
|
||||
return [&, stream_for_prefix](const ISerialization::SubstreamPath & substream_path) -> ReadBuffer * //-V1047
|
||||
{
|
||||
/// If substream have already been read.
|
||||
if (cache.count(ISerialization::getSubcolumnNameForStream(substream_path)))
|
||||
|
@ -122,7 +122,7 @@ struct MergeTreeDataSelectCache;
|
||||
// The projection selected to execute current query
|
||||
struct ProjectionCandidate
|
||||
{
|
||||
const ProjectionDescription * desc;
|
||||
const ProjectionDescription * desc{};
|
||||
PrewhereInfoPtr prewhere_info;
|
||||
ActionsDAGPtr before_where;
|
||||
String where_column_name;
|
||||
|
@ -172,7 +172,7 @@ void LogSource::readData(const NameAndTypePair & name_and_type, ColumnPtr & colu
|
||||
|
||||
auto create_stream_getter = [&](bool stream_for_prefix)
|
||||
{
|
||||
return [&, stream_for_prefix] (const ISerialization::SubstreamPath & path) -> ReadBuffer *
|
||||
return [&, stream_for_prefix] (const ISerialization::SubstreamPath & path) -> ReadBuffer * //-V1047
|
||||
{
|
||||
if (cache.count(ISerialization::getSubcolumnNameForStream(path)))
|
||||
return nullptr;
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test>
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<preconditions>
|
||||
<table_exists>hits_100m_single</table_exists>
|
||||
</preconditions>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test max_ignored_relative_change="0.7">
|
||||
<settings>
|
||||
<max_memory_usage>30000000000</max_memory_usage>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.4">
|
||||
<test>
|
||||
<settings>
|
||||
<allow_experimental_bigint_types>1</allow_experimental_bigint_types>
|
||||
<max_threads>1</max_threads>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<query>SELECT boundingRatio(number, number) FROM numbers(100000000)</query>
|
||||
<query>SELECT (argMax(number, number) - argMin(number, number)) / (max(number) - min(number)) FROM numbers(100000000)</query>
|
||||
</test>
|
||||
|
@ -1,5 +1,4 @@
|
||||
<!-- FIXME this instability is abysmal, investigate the unstable queries -->
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<settings>
|
||||
<allow_suspicious_codecs>1</allow_suspicious_codecs>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<settings>
|
||||
<allow_suspicious_codecs>1</allow_suspicious_codecs>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<preconditions>
|
||||
<table_exists>hits_100m_single</table_exists>
|
||||
</preconditions>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test>
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>datetime_transform</name>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.5">
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<settings>
|
||||
<max_memory_usage>35G</max_memory_usage>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test>
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<settings>
|
||||
<max_memory_usage>15G</max_memory_usage>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test max_ignored_relative_change="0.4">
|
||||
<create_query>
|
||||
CREATE TABLE simple_key_direct_dictionary_source_table
|
||||
(
|
||||
|
@ -1,7 +1,7 @@
|
||||
<test>
|
||||
<preconditions>
|
||||
<table_exists>test.hits</table_exists>
|
||||
<table_exists>hits_100m_single</table_exists>
|
||||
</preconditions>
|
||||
|
||||
<query>SELECT count() FROM test.hits WHERE NOT ignore(encodeXMLComponent(URL))</query>
|
||||
<query>SELECT count() FROM hits_100m_single WHERE NOT ignore(encodeXMLComponent(URL))</query>
|
||||
</test>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test>
|
||||
<create_query>
|
||||
CREATE TABLE simple_key_flat_dictionary_source_table
|
||||
(
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>expr</name>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test>
|
||||
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test>
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>gp_hash_func</name>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test>
|
||||
<test max_ignored_relative_change="0.6">
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>hash_func</name>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('ui64 UInt64, i64 Int64, ui32 UInt32, i32 Int32, ui16 UInt16, i16 Int16, ui8 UInt8, i8 Int8') LIMIT 1000000000);</query>
|
||||
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('ui64 UInt64, i64 Int64, ui32 UInt32, i32 Int32, ui16 UInt16, i16 Int16, ui8 UInt8, i8 Int8', 0, 10, 10) LIMIT 1000000000);</query>
|
||||
<query>SELECT sum(NOT ignore(*)) FROM (SELECT * FROM generateRandom('i Enum8(\'hello\' = 1, \'world\' = 5)', 0, 10, 10) LIMIT 1000000000);</query>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<create_query>
|
||||
CREATE TABLE simple_key_hashed_dictionary_source_table
|
||||
(
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<preconditions>
|
||||
<table_exists>hits_100m_single</table_exists>
|
||||
<table_exists>hits_10m_single</table_exists>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<query>
|
||||
WITH
|
||||
bitXor(number, 0x4CF2D2BAAE6DA887) AS x0,
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test>
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>json</name>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test max_ignored_relative_change="0.6">
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>func_slow</name>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
|
||||
<create_query>CREATE TABLE bad_partitions (a UInt64, b UInt64, c UInt64, d UInt64, e UInt64, f UInt64, g UInt64, h UInt64, i UInt64, j UInt64, k UInt64, l UInt64, m UInt64, n UInt64, o UInt64, p UInt64, q UInt64, r UInt64, s UInt64, t UInt64, u UInt64, v UInt64, w UInt64, x UInt64, y UInt64, z UInt64) ENGINE = MergeTree PARTITION BY x ORDER BY x</create_query>
|
||||
<fill_query>INSERT INTO bad_partitions (x) SELECT * FROM numbers_mt(3000)</fill_query>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>format</name>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test>
|
||||
<settings>
|
||||
<do_not_merge_across_partitions_select_final>1</do_not_merge_across_partitions_select_final>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<settings>
|
||||
<parallel_view_processing>1</parallel_view_processing>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test>
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<settings>
|
||||
<!--
|
||||
Not sure why it's needed. Maybe it has something to do with the
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<create_query>
|
||||
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<create_query>
|
||||
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<create_query>
|
||||
CREATE TABLE hits_wide AS hits_10m_single ENGINE = MergeTree()
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test>
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<settings>
|
||||
<max_threads>4</max_threads>
|
||||
<max_memory_usage>20G</max_memory_usage>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<query>SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(10))</query>
|
||||
<query>SELECT count() FROM zeros(100000000) WHERE NOT ignore(randomString(100))</query>
|
||||
<query>SELECT count() FROM zeros(1000000) WHERE NOT ignore(randomString(1000))</query>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test>
|
||||
<preconditions>
|
||||
<table_exists>hits_100m_single</table_exists>
|
||||
</preconditions>
|
||||
|
@ -1,4 +1,5 @@
|
||||
<test>
|
||||
<query>SELECT sumIf(1, 0) FROM numbers(100000000)</query>
|
||||
<query>SELECT sumIf(1, 1) FROM numbers(100000000)</query>
|
||||
<!-- Shouldn't have been a perf test, but an EXPLAIN one. -->
|
||||
<query>SELECT sumIf(1, 0) FROM numbers(1000000000)</query>
|
||||
<query>SELECT sumIf(1, 1) FROM numbers(1000000000)</query>
|
||||
</test>
|
||||
|
File diff suppressed because one or more lines are too long
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<settings>
|
||||
<max_threads>1</max_threads>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<settings>
|
||||
<max_memory_usage>30000000000</max_memory_usage>
|
||||
</settings>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<test>
|
||||
<preconditions>
|
||||
<table_exists>hits_100m_single</table_exists>
|
||||
<table_exists>hits_10m_single</table_exists>
|
||||
|
@ -1,4 +1,4 @@
|
||||
<test max_ignored_relative_change="0.3">
|
||||
<test max_ignored_relative_change="0.2">
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>param</name>
|
||||
|
Loading…
Reference in New Issue
Block a user