Merge branch 'master' of github.com:ClickHouse/ClickHouse into random-settings

This commit is contained in:
avogar 2022-03-16 14:38:54 +00:00
commit 36a2d0b289
695 changed files with 6766 additions and 3091 deletions

View File

@ -46,9 +46,9 @@ struct StringRef
constexpr StringRef(const char * data_, size_t size_) : data(data_), size(size_) {}
StringRef(const std::string & s) : data(s.data()), size(s.size()) {}
StringRef(const std::string & s) : data(s.data()), size(s.size()) {} /// NOLINT
constexpr explicit StringRef(std::string_view s) : data(s.data()), size(s.size()) {}
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {}
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {} /// NOLINT
constexpr StringRef() = default;
std::string toString() const { return std::string(data, size); }

View File

@ -60,5 +60,5 @@ clientPort=2181 \n\
maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg
RUN mkdir /zookeeper && chmod -R 777 /zookeeper
ENV TZ=Europe/Moscow
ENV TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

View File

@ -40,7 +40,7 @@ RUN apt-get update \
/tmp/* \
&& apt-get clean
ENV TZ=Europe/Moscow
ENV TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV DOCKER_CHANNEL stable

View File

@ -64,7 +64,7 @@ The supported formats are:
| [Null](#null) | ✗ | ✔ |
| [XML](#xml) | ✗ | ✔ |
| [CapnProto](#capnproto) | ✔ | ✔ |
| [LineAsString](#lineasstring) | ✔ | |
| [LineAsString](#lineasstring) | ✔ | |
| [Regexp](#data-format-regexp) | ✔ | ✗ |
| [RawBLOB](#rawblob) | ✔ | ✔ |
| [MsgPack](#msgpack) | ✔ | ✔ |

View File

@ -41,7 +41,7 @@ Example of configuration:
</clickhouse>
```
### An example of using named connections with the s3 function
### Example of using named connections with the s3 function
```sql
INSERT INTO FUNCTION s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz',
@ -57,7 +57,7 @@ FROM s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/t
1 rows in set. Elapsed: 0.279 sec. Processed 10.00 thousand rows, 90.00 KB (35.78 thousand rows/s., 322.02 KB/s.)
```
### An example of using named connections with an S3 table
### Example of using named connections with an S3 table
```sql
CREATE TABLE s3_engine_table (number Int64)
@ -72,7 +72,7 @@ SELECT * FROM s3_engine_table LIMIT 3;
└────────┘
```
## Named connections for accessing MySQL database.
## Named connections for accessing MySQL database
The description of parameters see [mysql](../sql-reference/table-functions/mysql.md).
@ -94,7 +94,7 @@ Example of configuration:
</clickhouse>
```
### An example of using named connections with the mysql function
### Example of using named connections with the mysql function
```sql
SELECT count() FROM mysql(mymysql, table = 'test');
@ -104,7 +104,7 @@ SELECT count() FROM mysql(mymysql, table = 'test');
└─────────┘
```
### An example of using named connections with an MySQL table
### Example of using named connections with an MySQL table
```sql
CREATE TABLE mytable(A Int64) ENGINE = MySQL(mymysql, table = 'test', connection_pool_size=3, replace_query=0);
@ -115,7 +115,20 @@ SELECT count() FROM mytable;
└─────────┘
```
### An example of using named with an external dictionary with source MySQL
### Example of using named connections with database with engine MySQL
```sql
CREATE DATABASE mydatabase ENGINE = MySQL(mymysql);
SHOW TABLES FROM mydatabase;
┌─name───┐
│ source │
│ test │
└────────┘
```
### Example of using named connections with an external dictionary with source MySQL
```sql
CREATE DICTIONARY dict (A Int64, B String)
@ -130,3 +143,87 @@ SELECT dictGet('dict', 'B', 2);
│ two │
└─────────────────────────┘
```
## Named connections for accessing PostgreSQL database
The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md).
Example of configuration:
```xml
<clickhouse>
<named_collections>
<mypg>
<user>pguser</user>
<password>jw8s0F4</password>
<host>127.0.0.1</host>
<port>5432</port>
<database>test</database>
<schema>test_schema</schema>
<connection_pool_size>8</connection_pool_size>
</mypg>
</named_collections>
</clickhouse>
```
### Example of using named connections with the postgresql function
```sql
SELECT * FROM postgresql(mypg, table = 'test');
┌─a─┬─b───┐
│ 2 │ two │
│ 1 │ one │
└───┴─────┘
SELECT * FROM postgresql(mypg, table = 'test', schema = 'public');
┌─a─┐
│ 1 │
│ 2 │
│ 3 │
└───┘
```
### Example of using named connections with database with engine PostgreSQL
```sql
CREATE TABLE mypgtable (a Int64) ENGINE = PostgreSQL(mypg, table = 'test', schema = 'public');
SELECT * FROM mypgtable;
┌─a─┐
│ 1 │
│ 2 │
│ 3 │
└───┘
```
### Example of using named connections with database with engine PostgreSQL
```sql
CREATE DATABASE mydatabase ENGINE = PostgreSQL(mypg);
SHOW TABLES FROM mydatabase
┌─name─┐
│ test │
└──────┘
```
### Example of using named connections with an external dictionary with source POSTGRESQL
```sql
CREATE DICTIONARY dict (a Int64, b String)
PRIMARY KEY a
SOURCE(POSTGRESQL(NAME mypg TABLE test))
LIFETIME(MIN 1 MAX 2)
LAYOUT(HASHED());
SELECT dictGet('dict', 'b', 2);
┌─dictGet('dict', 'b', 2)─┐
│ two │
└─────────────────────────┘
```

View File

@ -0,0 +1,48 @@
---
toc_priority: 69
toc_title: Statistics
---
# Functions for Working with Statistics {#functions-for-working-with-statistics}
# proportionsZTest {#proportionsztest}
Applies proportion z-test to samples from two populations (X and Y). The alternative is 'two-sided'.
**Syntax**
``` sql
proportionsZTest(successes_x, successes_y, trials_x, trials_y, significance_level, usevar)
```
**Arguments**
- `successes_x` — The number of successes for X in trials.
- `successes_y` — The number of successes for X in trials.
- `trials_x` — The number of trials for X.
- `trials_y` — The number of trials for Y.
- `significance_level`
- `usevar` - It can be `'pooled'` or `'unpooled'`.
- `'pooled'` - The variance of the two populations are assumed to be equal.
- `'unpooled'` - The assumption of equal variances is dropped.
**Returned value**
- A tuple with the (z-statistic, p-value, confidence-interval-lower, confidence-interval-upper).
Type: [Tuple](../../sql-reference/data-types/tuple.md).
**Example**
Query:
``` sql
SELECT proportionsZTest(10, 11, 100, 101, 0.95, 'unpooled');
```
Result:
``` text
(-0.20656724435948853,0.8363478437079654,-0.09345975390115283,0.07563797172293502)
```

View File

@ -24,7 +24,7 @@ $ cat /etc/clickhouse-server/config.d/named_collections.xml
</clickhouse>
```
## Именованные соединения для доступа к S3.
## Именованные соединения для доступа к S3
Описание параметров смотри [Табличная Функция S3](../sql-reference/table-functions/s3.md).
@ -72,7 +72,7 @@ SELECT * FROM s3_engine_table LIMIT 3;
└────────┘
```
## Пример использования именованных соединений с базой данных MySQL.
## Пример использования именованных соединений с базой данных MySQL
Описание параметров смотри [mysql](../sql-reference/table-functions/mysql.md).
@ -104,7 +104,7 @@ SELECT count() FROM mysql(mymysql, table = 'test');
└─────────┘
```
### Пример использования именованных таблицей с движком mysql
### Пример использования именованных соединений таблицей с движком mysql
```sql
CREATE TABLE mytable(A Int64) ENGINE = MySQL(mymysql, table = 'test', connection_pool_size=3, replace_query=0);
@ -115,7 +115,20 @@ SELECT count() FROM mytable;
└─────────┘
```
### Пример использования именованных с внешним словарем с источником mysql
### Пример использования именованных соединений базой данных с движком MySQL
```sql
CREATE DATABASE mydatabase ENGINE = MySQL(mymysql);
SHOW TABLES FROM mydatabase;
┌─name───┐
│ source │
│ test │
└────────┘
```
### Пример использования именованных соединений с внешним словарем с источником mysql
```sql
CREATE DICTIONARY dict (A Int64, B String)
@ -130,3 +143,86 @@ SELECT dictGet('dict', 'B', 2);
│ two │
└─────────────────────────┘
```
## Пример использования именованных соединений с базой данных PostgreSQL
Описание параметров смотри [postgresql](../sql-reference/table-functions/postgresql.md).
Пример конфигурации:
```xml
<clickhouse>
<named_collections>
<mypg>
<user>pguser</user>
<password>jw8s0F4</password>
<host>127.0.0.1</host>
<port>5432</port>
<database>test</database>
<schema>test_schema</schema>
<connection_pool_size>8</connection_pool_size>
</mypg>
</named_collections>
</clickhouse>
```
### Пример использования именованных соединений с табличной функцией postgresql
```sql
SELECT * FROM postgresql(mypg, table = 'test');
┌─a─┬─b───┐
│ 2 │ two │
│ 1 │ one │
└───┴─────┘
SELECT * FROM postgresql(mypg, table = 'test', schema = 'public');
┌─a─┐
│ 1 │
│ 2 │
│ 3 │
└───┘
```
### Пример использования именованных соединений таблицей с движком PostgreSQL
```sql
CREATE TABLE mypgtable (a Int64) ENGINE = PostgreSQL(mypg, table = 'test', schema = 'public');
SELECT * FROM mypgtable;
┌─a─┐
│ 1 │
│ 2 │
│ 3 │
└───┘
```
### Пример использования именованных соединений базой данных с движком PostgreSQL
```sql
CREATE DATABASE mydatabase ENGINE = PostgreSQL(mypg);
SHOW TABLES FROM mydatabase
┌─name─┐
│ test │
└──────┘
```
### Пример использования именованных соединений с внешним словарем с источником POSTGRESQL
```sql
CREATE DICTIONARY dict (a Int64, b String)
PRIMARY KEY a
SOURCE(POSTGRESQL(NAME mypg TABLE test))
LIFETIME(MIN 1 MAX 2)
LAYOUT(HASHED());
SELECT dictGet('dict', 'b', 2);
┌─dictGet('dict', 'b', 2)─┐
│ two │
└─────────────────────────┘
```

View File

@ -1,6 +1,5 @@
set (CLICKHOUSE_CLIENT_SOURCES
Client.cpp
TestTags.cpp
)
set (CLICKHOUSE_CLIENT_LINK

View File

@ -21,7 +21,6 @@
#include <Common/Config/configReadClient.h>
#include <Core/QueryProcessingStage.h>
#include <Client/TestHint.h>
#include <Columns/ColumnString.h>
#include <Poco/Util/Application.h>
@ -43,7 +42,6 @@
#include <Functions/registerFunctions.h>
#include <AggregateFunctions/registerAggregateFunctions.h>
#include <Formats/registerFormats.h>
#include "TestTags.h"
#ifndef __clang__
#pragma GCC optimize("-fno-var-tracking-assignments")
@ -102,212 +100,6 @@ void Client::processError(const String & query) const
}
bool Client::executeMultiQuery(const String & all_queries_text)
{
// It makes sense not to base any control flow on this, so that it is
// the same in tests and in normal usage. The only difference is that in
// normal mode we ignore the test hints.
const bool test_mode = config().has("testmode");
if (test_mode)
{
/// disable logs if expects errors
TestHint test_hint(test_mode, all_queries_text);
if (test_hint.clientError() || test_hint.serverError())
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
}
bool echo_query = echo_queries;
/// Test tags are started with "--" so they are interpreted as comments anyway.
/// But if the echo is enabled we have to remove the test tags from `all_queries_text`
/// because we don't want test tags to be echoed.
size_t test_tags_length = test_mode ? getTestTagsLength(all_queries_text) : 0;
/// Several queries separated by ';'.
/// INSERT data is ended by the end of line, not ';'.
/// An exception is VALUES format where we also support semicolon in
/// addition to end of line.
const char * this_query_begin = all_queries_text.data() + test_tags_length;
const char * this_query_end;
const char * all_queries_end = all_queries_text.data() + all_queries_text.size();
String full_query; // full_query is the query + inline INSERT data + trailing comments (the latter is our best guess for now).
String query_to_execute;
ASTPtr parsed_query;
std::optional<Exception> current_exception;
while (true)
{
auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end,
query_to_execute, parsed_query, all_queries_text, current_exception);
switch (stage)
{
case MultiQueryProcessingStage::QUERIES_END:
case MultiQueryProcessingStage::PARSING_FAILED:
{
return true;
}
case MultiQueryProcessingStage::CONTINUE_PARSING:
{
continue;
}
case MultiQueryProcessingStage::PARSING_EXCEPTION:
{
this_query_end = find_first_symbols<'\n'>(this_query_end, all_queries_end);
// Try to find test hint for syntax error. We don't know where
// the query ends because we failed to parse it, so we consume
// the entire line.
TestHint hint(test_mode, String(this_query_begin, this_query_end - this_query_begin));
if (hint.serverError())
{
// Syntax errors are considered as client errors
current_exception->addMessage("\nExpected server error '{}'.", hint.serverError());
current_exception->rethrow();
}
if (hint.clientError() != current_exception->code())
{
if (hint.clientError())
current_exception->addMessage("\nExpected client error: " + std::to_string(hint.clientError()));
current_exception->rethrow();
}
/// It's expected syntax error, skip the line
this_query_begin = this_query_end;
current_exception.reset();
continue;
}
case MultiQueryProcessingStage::EXECUTE_QUERY:
{
full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin);
if (query_fuzzer_runs)
{
if (!processWithFuzzing(full_query))
return false;
this_query_begin = this_query_end;
continue;
}
// Now we know for sure where the query ends.
// Look for the hint in the text of query + insert data + trailing
// comments,
// e.g. insert into t format CSV 'a' -- { serverError 123 }.
// Use the updated query boundaries we just calculated.
TestHint test_hint(test_mode, full_query);
// Echo all queries if asked; makes for a more readable reference
// file.
echo_query = test_hint.echoQueries().value_or(echo_query);
try
{
processParsedSingleQuery(full_query, query_to_execute, parsed_query, echo_query, false);
}
catch (...)
{
// Surprisingly, this is a client error. A server error would
// have been reported w/o throwing (see onReceiveSeverException()).
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
have_error = true;
}
// Check whether the error (or its absence) matches the test hints
// (or their absence).
bool error_matches_hint = true;
if (have_error)
{
if (test_hint.serverError())
{
if (!server_exception)
{
error_matches_hint = false;
fmt::print(stderr, "Expected server error code '{}' but got no server error (query: {}).\n",
test_hint.serverError(), full_query);
}
else if (server_exception->code() != test_hint.serverError())
{
error_matches_hint = false;
fmt::print(stderr, "Expected server error code: {} but got: {} (query: {}).\n",
test_hint.serverError(), server_exception->code(), full_query);
}
}
if (test_hint.clientError())
{
if (!client_exception)
{
error_matches_hint = false;
fmt::print(stderr, "Expected client error code '{}' but got no client error (query: {}).\n",
test_hint.clientError(), full_query);
}
else if (client_exception->code() != test_hint.clientError())
{
error_matches_hint = false;
fmt::print(stderr, "Expected client error code '{}' but got '{}' (query: {}).\n",
test_hint.clientError(), client_exception->code(), full_query);
}
}
if (!test_hint.clientError() && !test_hint.serverError())
{
// No error was expected but it still occurred. This is the
// default case w/o test hint, doesn't need additional
// diagnostics.
error_matches_hint = false;
}
}
else
{
if (test_hint.clientError())
{
fmt::print(stderr, "The query succeeded but the client error '{}' was expected (query: {}).\n",
test_hint.clientError(), full_query);
error_matches_hint = false;
}
if (test_hint.serverError())
{
fmt::print(stderr, "The query succeeded but the server error '{}' was expected (query: {}).\n",
test_hint.serverError(), full_query);
error_matches_hint = false;
}
}
// If the error is expected, force reconnect and ignore it.
if (have_error && error_matches_hint)
{
client_exception.reset();
server_exception.reset();
have_error = false;
if (!connection->checkConnected())
connect();
}
// For INSERTs with inline data: use the end of inline data as
// reported by the format parser (it is saved in sendData()).
// This allows us to handle queries like:
// insert into t values (1); select 1
// , where the inline data is delimited by semicolon and not by a
// newline.
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
if (insert_ast && isSyncInsertWithData(*insert_ast, global_context))
{
this_query_end = insert_ast->end;
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
}
// Report error.
if (have_error)
processError(full_query);
// Stop processing queries if needed.
if (have_error && !ignore_error)
return is_interactive;
this_query_begin = this_query_end;
break;
}
}
}
}
/// Make query to get all server warnings
std::vector<String> Client::loadWarningMessages()
{
@ -496,7 +288,7 @@ void Client::connect()
if (hosts_and_ports.empty())
{
String host = config().getString("host", "localhost");
UInt16 port = static_cast<UInt16>(ConnectionParameters::getPortFromConfig(config()));
UInt16 port = ConnectionParameters::getPortFromConfig(config());
hosts_and_ports.emplace_back(HostAndPort{host, port});
}
@ -995,6 +787,7 @@ void Client::printHelpMessage(const OptionsDescription & options_description)
{
std::cout << options_description.main_description.value() << "\n";
std::cout << options_description.external_description.value() << "\n";
std::cout << options_description.hosts_and_ports_description.value() << "\n";
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
}
@ -1015,7 +808,6 @@ void Client::addOptions(OptionsDescription & options_description)
("password", po::value<std::string>()->implicit_value("\n", ""), "password")
("ask-password", "ask-password")
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
("testmode,T", "enable test hints in comments")
("max_client_network_bandwidth", po::value<int>(), "the maximum speed of data exchange over the network for the client in bytes per second.")
("compression", po::value<bool>(), "enable or disable compression")
@ -1058,7 +850,7 @@ void Client::addOptions(OptionsDescription & options_description)
"Example of usage: '--host host1 --host host2 --port port2 --host host3 ...'"
"Each '--port port' will be attached to the last seen host that doesn't have a port yet,"
"if there is no such host, the port will be attached to the next first host or to default host.")
("port", po::value<UInt16>()->default_value(DBMS_DEFAULT_PORT), "server ports")
("port", po::value<UInt16>(), "server ports")
;
}
@ -1106,8 +898,11 @@ void Client::processOptions(const OptionsDescription & options_description,
= po::command_line_parser(hosts_and_ports_argument).options(options_description.hosts_and_ports_description.value()).run();
po::variables_map host_and_port_options;
po::store(parsed_hosts_and_ports, host_and_port_options);
hosts_and_ports.emplace_back(
HostAndPort{host_and_port_options["host"].as<std::string>(), host_and_port_options["port"].as<UInt16>()});
std::string host = host_and_port_options["host"].as<std::string>();
std::optional<UInt16> port = !host_and_port_options["port"].empty()
? std::make_optional(host_and_port_options["port"].as<UInt16>())
: std::nullopt;
hosts_and_ports.emplace_back(HostAndPort{host, port});
}
send_external_tables = true;
@ -1151,8 +946,6 @@ void Client::processOptions(const OptionsDescription & options_description,
config().setBool("ask-password", true);
if (options.count("quota_key"))
config().setString("quota_key", options["quota_key"].as<std::string>());
if (options.count("testmode"))
config().setBool("testmode", true);
if (options.count("max_client_network_bandwidth"))
max_client_network_bandwidth = options["max_client_network_bandwidth"].as<int>();
if (options.count("compression"))

View File

@ -16,20 +16,24 @@ public:
int main(const std::vector<String> & /*args*/) override;
protected:
bool executeMultiQuery(const String & all_queries_text) override;
bool processWithFuzzing(const String & full_query) override;
void connect() override;
void processError(const String & query) const override;
String getName() const override { return "client"; }
void printHelpMessage(const OptionsDescription & options_description) override;
void addOptions(OptionsDescription & options_description) override;
void processOptions(
const OptionsDescription & options_description,
const CommandLineOptions & options,
const std::vector<Arguments> & external_tables_arguments,
const std::vector<Arguments> & hosts_and_ports_arguments) override;
void processConfig() override;
private:

View File

@ -92,92 +92,6 @@ void LocalServer::processError(const String &) const
}
bool LocalServer::executeMultiQuery(const String & all_queries_text)
{
bool echo_query = echo_queries;
/// Several queries separated by ';'.
/// INSERT data is ended by the end of line, not ';'.
/// An exception is VALUES format where we also support semicolon in
/// addition to end of line.
const char * this_query_begin = all_queries_text.data();
const char * this_query_end;
const char * all_queries_end = all_queries_text.data() + all_queries_text.size();
String full_query; // full_query is the query + inline INSERT data + trailing comments (the latter is our best guess for now).
String query_to_execute;
ASTPtr parsed_query;
std::optional<Exception> current_exception;
while (true)
{
auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end,
query_to_execute, parsed_query, all_queries_text, current_exception);
switch (stage)
{
case MultiQueryProcessingStage::QUERIES_END:
case MultiQueryProcessingStage::PARSING_FAILED:
{
return true;
}
case MultiQueryProcessingStage::CONTINUE_PARSING:
{
continue;
}
case MultiQueryProcessingStage::PARSING_EXCEPTION:
{
if (current_exception)
current_exception->rethrow();
return true;
}
case MultiQueryProcessingStage::EXECUTE_QUERY:
{
full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin);
try
{
processParsedSingleQuery(full_query, query_to_execute, parsed_query, echo_query, false);
}
catch (...)
{
if (!is_interactive && !ignore_error)
throw;
// Surprisingly, this is a client error. A server error would
// have been reported w/o throwing (see onReceiveSeverException()).
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
have_error = true;
}
// For INSERTs with inline data: use the end of inline data as
// reported by the format parser (it is saved in sendData()).
// This allows us to handle queries like:
// insert into t values (1); select 1
// , where the inline data is delimited by semicolon and not by a
// newline.
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
if (insert_ast && insert_ast->data)
{
this_query_end = insert_ast->end;
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
}
// Report error.
if (have_error)
processError(full_query);
// Stop processing queries if needed.
if (have_error && !ignore_error)
return is_interactive;
this_query_begin = this_query_end;
break;
}
}
}
}
void LocalServer::initialize(Poco::Util::Application & self)
{
Poco::Util::Application::initialize(self);

View File

@ -31,17 +31,19 @@ public:
int main(const std::vector<String> & /*args*/) override;
protected:
bool executeMultiQuery(const String & all_queries_text) override;
void connect() override;
void processError(const String & query) const override;
String getName() const override { return "local"; }
void printHelpMessage(const OptionsDescription & options_description) override;
void addOptions(OptionsDescription & options_description) override;
void processOptions(const OptionsDescription & options_description, const CommandLineOptions & options,
const std::vector<Arguments> &, const std::vector<Arguments> &) override;
void processConfig() override;
private:

View File

@ -932,6 +932,8 @@ if (ThreadFuzzer::instance().isEffective())
global_context->loadOrReloadModels(*config);
global_context->loadOrReloadUserDefinedExecutableFunctions(*config);
global_context->setRemoteHostFilter(*config);
/// Setup protection to avoid accidental DROP for big tables (that are greater than 50 GB by default)
if (config->has("max_table_size_to_drop"))
global_context->setMaxTableSizeToDrop(config->getUInt64("max_table_size_to_drop"));

View File

@ -575,8 +575,8 @@
const formatted_rows = formatReadableRows(rows);
stats.innerText = `Elapsed: ${seconds} sec, read ${formatted_rows} rows, ${formatted_bytes}.`;
/// We can also render graphs if user performed EXPLAIN PIPELINE graph=1.
if (response.data.length > 3 && response.data[0][0] === "digraph" && document.getElementById('query').value.match(/^\s*EXPLAIN/i)) {
/// We can also render graphs if user performed EXPLAIN PIPELINE graph=1 or EXPLAIN AST graph = 1
if (response.data.length > 3 && response.data[0][0].startsWith("digraph") && document.getElementById('query').value.match(/^\s*EXPLAIN/i)) {
renderGraph(response);
} else {
renderTable(response);

View File

@ -15,7 +15,7 @@ class AccessRights
{
public:
AccessRights();
AccessRights(const AccessFlags & access);
explicit AccessRights(const AccessFlags & access);
~AccessRights();
AccessRights(const AccessRights & src);
AccessRights & operator =(const AccessRights & src);

View File

@ -15,17 +15,17 @@ using Strings = std::vector<String>;
class AccessFlags
{
public:
AccessFlags(AccessType type);
AccessFlags(AccessType type); /// NOLINT
/// The same as AccessFlags(AccessType::NONE).
AccessFlags() = default;
/// Constructs from a string like "SELECT".
AccessFlags(const std::string_view & keyword);
AccessFlags(const std::string_view & keyword); /// NOLINT
/// Constructs from a list of strings like "SELECT, UPDATE, INSERT".
AccessFlags(const std::vector<std::string_view> & keywords);
AccessFlags(const Strings & keywords);
AccessFlags(const std::vector<std::string_view> & keywords); /// NOLINT
AccessFlags(const Strings & keywords); /// NOLINT
AccessFlags(const AccessFlags & src) = default;
AccessFlags(AccessFlags && src) = default;
@ -109,7 +109,7 @@ private:
using Flags = std::bitset<SIZE>;
Flags flags;
AccessFlags(const Flags & flags_) : flags(flags_) {}
AccessFlags(const Flags & flags_) : flags(flags_) {} /// NOLINT
};
AccessFlags operator |(AccessType left, AccessType right);

View File

@ -26,7 +26,7 @@ struct AccessRightsElement
AccessRightsElement(AccessRightsElement &&) = default;
AccessRightsElement & operator=(AccessRightsElement &&) = default;
AccessRightsElement(AccessFlags access_flags_) : access_flags(access_flags_) {}
explicit AccessRightsElement(AccessFlags access_flags_) : access_flags(access_flags_) {}
AccessRightsElement(AccessFlags access_flags_, const std::string_view & database_);
AccessRightsElement(AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_);

View File

@ -18,7 +18,7 @@ public:
class IPSubnet
{
public:
IPSubnet() {}
IPSubnet() = default;
IPSubnet(const IPAddress & prefix_, const IPAddress & mask_) { set(prefix_, mask_); }
IPSubnet(const IPAddress & prefix_, size_t num_prefix_bits) { set(prefix_, num_prefix_bits); }
explicit IPSubnet(const IPAddress & address) { set(address); }
@ -43,9 +43,9 @@ public:
struct AnyHostTag {};
AllowedClientHosts() {}
AllowedClientHosts(AnyHostTag) { addAnyHost(); }
~AllowedClientHosts() {}
AllowedClientHosts() = default;
AllowedClientHosts(AnyHostTag) { addAnyHost(); } /// NOLINT
~AllowedClientHosts() = default;
AllowedClientHosts(const AllowedClientHosts & src) = default;
AllowedClientHosts & operator =(const AllowedClientHosts & src) = default;

View File

@ -54,7 +54,7 @@ class AuthenticationData
public:
using Digest = std::vector<uint8_t>;
AuthenticationData(AuthenticationType type_ = AuthenticationType::NO_PASSWORD) : type(type_) {}
explicit AuthenticationData(AuthenticationType type_ = AuthenticationType::NO_PASSWORD) : type(type_) {}
AuthenticationData(const AuthenticationData & src) = default;
AuthenticationData & operator =(const AuthenticationData & src) = default;
AuthenticationData(AuthenticationData && src) = default;

View File

@ -158,7 +158,7 @@ public:
private:
friend class AccessControl;
ContextAccess() {}
ContextAccess() {} /// NOLINT
ContextAccess(const AccessControl & access_control_, const Params & params_);
void initialize();

View File

@ -21,7 +21,6 @@ public:
protected:
[[noreturn]] static void throwNotReady();
protected:
bool is_ready = false;
String user_name;
};

View File

@ -60,8 +60,8 @@ public:
private:
friend class QuotaCache;
EnabledQuota(const Params & params_);
EnabledQuota() {}
explicit EnabledQuota(const Params & params_);
EnabledQuota() {} /// NOLINT
const String & getUserName() const { return params.user_name; }

View File

@ -41,7 +41,7 @@ public:
private:
friend class RoleCache;
EnabledRoles(const Params & params_);
explicit EnabledRoles(const Params & params_);
void setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> & info_, scope_guard & notifications);

View File

@ -44,7 +44,7 @@ public:
private:
friend class RowPolicyCache;
EnabledRowPolicies(const Params & params_);
explicit EnabledRowPolicies(const Params & params_);
struct MixedFiltersKey
{

View File

@ -38,7 +38,7 @@ public:
private:
friend class SettingsProfilesCache;
EnabledSettings(const Params & params_);
explicit EnabledSettings(const Params & params_);
void setInfo(const std::shared_ptr<const SettingsProfilesInfo> & info_);
const Params params;

View File

@ -51,7 +51,6 @@ private:
using LDAPCaches = std::map<String, LDAPCache>; // server name -> cache
using LDAPParams = std::map<String, LDAPClient::Params>; // server name -> params
private:
mutable std::recursive_mutex mutex;
LDAPParams ldap_client_params_blueprint;
mutable LDAPCaches ldap_caches;

View File

@ -47,7 +47,6 @@ private:
void resetHandles() noexcept;
void initHandles();
private:
const Params params;
bool is_failed = false;

View File

@ -24,8 +24,8 @@ class ExternalAuthenticators;
class IAccessStorage
{
public:
IAccessStorage(const String & storage_name_) : storage_name(storage_name_) {}
virtual ~IAccessStorage() {}
explicit IAccessStorage(const String & storage_name_) : storage_name(storage_name_) {}
virtual ~IAccessStorage() = default;
/// Returns the name of this storage.
const String & getStorageName() const { return storage_name; }

View File

@ -37,7 +37,7 @@ public:
String getLDAPServerName() const;
public: // IAccessStorage implementations.
// IAccessStorage implementations.
virtual const char * getStorageType() const override;
virtual String getStorageParamsJSON() const override;
virtual bool isReadOnly() const override { return true; }
@ -54,7 +54,6 @@ private: // IAccessStorage implementations.
virtual scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override;
virtual std::optional<UUID> authenticateImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators, bool throw_if_user_not_exists, bool allow_no_password, bool allow_plaintext_password) const override;
private:
void setConfiguration(AccessControl * access_control_, const Poco::Util::AbstractConfiguration & config, const String & prefix);
void processRoleChange(const UUID & id, const AccessEntityPtr & entity);

View File

@ -153,7 +153,7 @@ namespace
}
void LDAPClient::diag(const int rc, String text)
void LDAPClient::diag(int rc, String text)
{
std::scoped_lock lock(ldap_global_mutex);

View File

@ -133,12 +133,11 @@ public:
LDAPClient & operator= (LDAPClient &&) = delete;
protected:
MAYBE_NORETURN void diag(const int rc, String text = "");
MAYBE_NORETURN void diag(int rc, String text = "");
MAYBE_NORETURN bool openConnection();
void closeConnection() noexcept;
SearchResults search(const SearchParams & search_params);
protected:
const Params params;
#if USE_LDAP
LDAP * handle = nullptr;

View File

@ -15,7 +15,7 @@ class MemoryAccessStorage : public IAccessStorage
public:
static constexpr char STORAGE_TYPE[] = "memory";
MemoryAccessStorage(const String & storage_name_ = STORAGE_TYPE);
explicit MemoryAccessStorage(const String & storage_name_ = STORAGE_TYPE);
const char * getStorageType() const override { return STORAGE_TYPE; }

View File

@ -17,7 +17,7 @@ public:
using StoragePtr = std::shared_ptr<Storage>;
using ConstStoragePtr = std::shared_ptr<const Storage>;
MultipleAccessStorage(const String & storage_name_ = STORAGE_TYPE);
explicit MultipleAccessStorage(const String & storage_name_ = STORAGE_TYPE);
~MultipleAccessStorage() override;
const char * getStorageType() const override { return STORAGE_TYPE; }

View File

@ -19,7 +19,7 @@ struct RolesOrUsersSet;
class QuotaCache
{
public:
QuotaCache(const AccessControl & access_control_);
explicit QuotaCache(const AccessControl & access_control_);
~QuotaCache();
std::shared_ptr<const EnabledQuota> getEnabledQuota(

View File

@ -16,7 +16,7 @@ using RolePtr = std::shared_ptr<const Role>;
class RoleCache
{
public:
RoleCache(const AccessControl & access_control_);
explicit RoleCache(const AccessControl & access_control_);
~RoleCache();
std::shared_ptr<const EnabledRoles> getEnabledRoles(

View File

@ -26,13 +26,13 @@ struct RolesOrUsersSet
RolesOrUsersSet & operator =(RolesOrUsersSet && src) noexcept;
struct AllTag {};
RolesOrUsersSet(AllTag);
RolesOrUsersSet(AllTag); /// NOLINT
RolesOrUsersSet(const UUID & id);
RolesOrUsersSet(const std::vector<UUID> & ids_);
RolesOrUsersSet(const UUID & id); /// NOLINT
RolesOrUsersSet(const std::vector<UUID> & ids_); /// NOLINT
/// The constructor from AST requires the AccessControl if `ast.id_mode == false`.
RolesOrUsersSet(const ASTRolesOrUsersSet & ast);
RolesOrUsersSet(const ASTRolesOrUsersSet & ast); /// NOLINT
RolesOrUsersSet(const ASTRolesOrUsersSet & ast, const std::optional<UUID> & current_user_id);
RolesOrUsersSet(const ASTRolesOrUsersSet & ast, const AccessControl & access_control);
RolesOrUsersSet(const ASTRolesOrUsersSet & ast, const AccessControl & access_control, const std::optional<UUID> & current_user_id);

View File

@ -18,7 +18,7 @@ using RowPolicyPtr = std::shared_ptr<const RowPolicy>;
class RowPolicyCache
{
public:
RowPolicyCache(const AccessControl & access_control_);
explicit RowPolicyCache(const AccessControl & access_control_);
~RowPolicyCache();
std::shared_ptr<const EnabledRowPolicies> getEnabledRowPolicies(const UUID & user_id, const boost::container::flat_set<UUID> & enabled_roles);
@ -26,7 +26,7 @@ public:
private:
struct PolicyInfo
{
PolicyInfo(const RowPolicyPtr & policy_) { setPolicy(policy_); }
explicit PolicyInfo(const RowPolicyPtr & policy_) { setPolicy(policy_); }
void setPolicy(const RowPolicyPtr & policy_);
RowPolicyPtr policy;

View File

@ -51,7 +51,7 @@ class AccessControl;
class SettingsConstraints
{
public:
SettingsConstraints(const AccessControl & access_control_);
explicit SettingsConstraints(const AccessControl & access_control_);
SettingsConstraints(const SettingsConstraints & src);
SettingsConstraints & operator=(const SettingsConstraints & src);
SettingsConstraints(SettingsConstraints && src) noexcept;

View File

@ -15,7 +15,7 @@ struct SettingsConstraintsAndProfileIDs
std::vector<UUID> current_profiles;
std::vector<UUID> enabled_profiles;
SettingsConstraintsAndProfileIDs(const AccessControl & access_control_) : constraints(access_control_) {}
explicit SettingsConstraintsAndProfileIDs(const AccessControl & access_control_) : constraints(access_control_) {}
};
}

View File

@ -33,10 +33,10 @@ struct SettingsProfileElement
friend bool operator <=(const SettingsProfileElement & lhs, const SettingsProfileElement & rhs) { return !(rhs < lhs); }
friend bool operator >=(const SettingsProfileElement & lhs, const SettingsProfileElement & rhs) { return !(lhs < rhs); }
SettingsProfileElement() {}
SettingsProfileElement() = default;
/// The constructor from AST requires the AccessControl if `ast.id_mode == false`.
SettingsProfileElement(const ASTSettingsProfileElement & ast);
SettingsProfileElement(const ASTSettingsProfileElement & ast); /// NOLINT
SettingsProfileElement(const ASTSettingsProfileElement & ast, const AccessControl & access_control);
std::shared_ptr<ASTSettingsProfileElement> toAST() const;
std::shared_ptr<ASTSettingsProfileElement> toASTWithNames(const AccessControl & access_control) const;
@ -49,10 +49,10 @@ private:
class SettingsProfileElements : public std::vector<SettingsProfileElement>
{
public:
SettingsProfileElements() {}
SettingsProfileElements() = default;
/// The constructor from AST requires the AccessControl if `ast.id_mode == false`.
SettingsProfileElements(const ASTSettingsProfileElements & ast);
SettingsProfileElements(const ASTSettingsProfileElements & ast); /// NOLINT
SettingsProfileElements(const ASTSettingsProfileElements & ast, const AccessControl & access_control);
std::shared_ptr<ASTSettingsProfileElements> toAST() const;
std::shared_ptr<ASTSettingsProfileElements> toASTWithNames(const AccessControl & access_control) const;

View File

@ -18,7 +18,7 @@ struct SettingsProfilesInfo;
class SettingsProfilesCache
{
public:
SettingsProfilesCache(const AccessControl & access_control_);
explicit SettingsProfilesCache(const AccessControl & access_control_);
~SettingsProfilesCache();
void setDefaultProfileName(const String & default_profile_name);

View File

@ -29,7 +29,7 @@ struct SettingsProfilesInfo
/// Names of all the profiles in `profiles`.
std::unordered_map<UUID, String> names_of_profiles;
SettingsProfilesInfo(const AccessControl & access_control_) : constraints(access_control_), access_control(access_control_) {}
explicit SettingsProfilesInfo(const AccessControl & access_control_) : constraints(access_control_), access_control(access_control_) {}
std::shared_ptr<const SettingsConstraintsAndProfileIDs> getConstraintsAndProfileIDs(
const std::shared_ptr<const SettingsConstraintsAndProfileIDs> & previous = nullptr) const;

View File

@ -24,8 +24,9 @@ public:
using IsNoPasswordFunction = std::function<bool()>;
using IsPlaintextPasswordFunction = std::function<bool()>;
UsersConfigAccessStorage(const String & storage_name_ = STORAGE_TYPE, const CheckSettingNameFunction & check_setting_name_function_ = {}, const IsNoPasswordFunction & is_no_password_allowed_function_ ={}, const IsPlaintextPasswordFunction & is_plaintext_password_allowed_function_ = {});
UsersConfigAccessStorage(const CheckSettingNameFunction & check_setting_name_function_, const IsNoPasswordFunction & is_no_password_allowed_function_, const IsPlaintextPasswordFunction & is_plaintext_password_allowed_function_);
UsersConfigAccessStorage(const String & storage_name_ = STORAGE_TYPE, const CheckSettingNameFunction & check_setting_name_function_ = {}, const IsNoPasswordFunction & is_no_password_allowed_function_ ={}, const IsPlaintextPasswordFunction & is_plaintext_password_allowed_function_ = {}); /// NOLINT
UsersConfigAccessStorage(const CheckSettingNameFunction & check_setting_name_function_, const IsNoPasswordFunction & is_no_password_allowed_function_, const IsPlaintextPasswordFunction & is_plaintext_password_allowed_function_); /// NOLINT
~UsersConfigAccessStorage() override;
const char * getStorageType() const override { return STORAGE_TYPE; }

View File

@ -97,7 +97,7 @@ private:
/** Calculates the slope of a line between leftmost and rightmost data points.
* (y2 - y1) / (x2 - x1)
*/
Float64 NO_SANITIZE_UNDEFINED getBoundingRatio(const AggregateFunctionBoundingRatioData & data) const
static Float64 NO_SANITIZE_UNDEFINED getBoundingRatio(const AggregateFunctionBoundingRatioData & data)
{
if (data.empty)
return std::numeric_limits<Float64>::quiet_NaN();
@ -111,11 +111,11 @@ public:
return "boundingRatio";
}
AggregateFunctionBoundingRatio(const DataTypes & arguments)
explicit AggregateFunctionBoundingRatio(const DataTypes & arguments)
: IAggregateFunctionDataHelper<AggregateFunctionBoundingRatioData, AggregateFunctionBoundingRatio>(arguments, {})
{
const auto x_arg = arguments.at(0).get();
const auto y_arg = arguments.at(1).get();
const auto * x_arg = arguments.at(0).get();
const auto * y_arg = arguments.at(1).get();
if (!x_arg->isValueRepresentedByNumber() || !y_arg->isValueRepresentedByNumber())
throw Exception("Illegal types of arguments of aggregate function " + getName() + ", must have number representation.",

View File

@ -63,12 +63,12 @@ public:
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override
{
auto y_col = static_cast<const ColumnUInt8 *>(columns[category_count]);
const auto * y_col = static_cast<const ColumnUInt8 *>(columns[category_count]);
bool y = y_col->getData()[row_num];
for (size_t i : collections::range(0, category_count))
{
auto x_col = static_cast<const ColumnUInt8 *>(columns[i]);
const auto * x_col = static_cast<const ColumnUInt8 *>(columns[i]);
bool x = x_col->getData()[row_num];
if (x)
@ -104,7 +104,7 @@ public:
);
}
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override /// NOLINT
{
auto & col = static_cast<ColumnArray &>(to);
auto & data_col = static_cast<ColumnFloat64 &>(col.getData());

View File

@ -37,7 +37,7 @@ namespace ErrorCodes
class AggregateFunctionCount final : public IAggregateFunctionDataHelper<AggregateFunctionCountData, AggregateFunctionCount>
{
public:
AggregateFunctionCount(const DataTypes & argument_types_) : IAggregateFunctionDataHelper(argument_types_, {}) {}
explicit AggregateFunctionCount(const DataTypes & argument_types_) : IAggregateFunctionDataHelper(argument_types_, {}) {}
String getName() const override { return "count"; }
@ -107,7 +107,7 @@ public:
}
/// Reset the state to specified value. This function is not the part of common interface.
void set(AggregateDataPtr __restrict place, UInt64 new_count) const
static void set(AggregateDataPtr __restrict place, UInt64 new_count)
{
data(place).count = new_count;
}
@ -206,7 +206,7 @@ public:
void addBatchSinglePlace(
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena *, ssize_t if_argument_pos) const override
{
auto & nc = assert_cast<const ColumnNullable &>(*columns[0]);
const auto & nc = assert_cast<const ColumnNullable &>(*columns[0]);
if (if_argument_pos >= 0)
{
const auto & flags = assert_cast<const ColumnUInt8 &>(*columns[if_argument_pos]).getData();

View File

@ -91,7 +91,7 @@ private:
size_t num_args;
public:
AggregateFunctionEntropy(const DataTypes & argument_types_)
explicit AggregateFunctionEntropy(const DataTypes & argument_types_)
: IAggregateFunctionDataHelper<EntropyData<Value>, AggregateFunctionEntropy<Value>>(argument_types_, {})
, num_args(argument_types_.size())
{

View File

@ -39,7 +39,7 @@ struct AggregateFunctionWithProperties
AggregateFunctionWithProperties & operator = (const AggregateFunctionWithProperties &) = default;
template <typename Creator, std::enable_if_t<!std::is_same_v<Creator, AggregateFunctionWithProperties>> * = nullptr>
AggregateFunctionWithProperties(Creator creator_, AggregateFunctionProperties properties_ = {})
AggregateFunctionWithProperties(Creator creator_, AggregateFunctionProperties properties_ = {}) /// NOLINT
: creator(std::forward<Creator>(creator_)), properties(std::move(properties_))
{
}

View File

@ -144,7 +144,7 @@ public:
}
}
void create(AggregateDataPtr __restrict place) const override
void create(AggregateDataPtr __restrict place) const override /// NOLINT
{
[[maybe_unused]] auto a = new (place) Data;
if constexpr (Trait::sampler == Sampler::RNG)
@ -447,7 +447,7 @@ public:
}
}
void create(AggregateDataPtr __restrict place) const override
void create(AggregateDataPtr __restrict place) const override /// NOLINT
{
[[maybe_unused]] auto a = new (place) Data;
if constexpr (Trait::sampler == Sampler::RNG)

View File

@ -77,11 +77,11 @@ struct MovingAvgData : public MovingData<T>
};
template <typename T, typename Tlimit_num_elems, typename Data>
template <typename T, typename LimitNumElements, typename Data>
class MovingImpl final
: public IAggregateFunctionDataHelper<Data, MovingImpl<T, Tlimit_num_elems, Data>>
: public IAggregateFunctionDataHelper<Data, MovingImpl<T, LimitNumElements, Data>>
{
static constexpr bool limit_num_elems = Tlimit_num_elems::value;
static constexpr bool limit_num_elems = LimitNumElements::value;
UInt64 window_size;
public:
@ -93,7 +93,7 @@ public:
using ColumnResult = ColumnVectorOrDecimal<ResultT>;
explicit MovingImpl(const DataTypePtr & data_type_, UInt64 window_size_ = std::numeric_limits<UInt64>::max())
: IAggregateFunctionDataHelper<Data, MovingImpl<T, Tlimit_num_elems, Data>>({data_type_}, {})
: IAggregateFunctionDataHelper<Data, MovingImpl<T, LimitNumElements, Data>>({data_type_}, {})
, window_size(window_size_) {}
String getName() const override { return Data::name; }

View File

@ -18,7 +18,7 @@ template <typename T, typename Data>
class AggregateFunctionBitmap final : public IAggregateFunctionDataHelper<Data, AggregateFunctionBitmap<T, Data>>
{
public:
AggregateFunctionBitmap(const DataTypePtr & type)
explicit AggregateFunctionBitmap(const DataTypePtr & type)
: IAggregateFunctionDataHelper<Data, AggregateFunctionBitmap<T, Data>>({type}, {})
{
}
@ -55,9 +55,9 @@ template <typename T, typename Data, typename Policy>
class AggregateFunctionBitmapL2 final : public IAggregateFunctionDataHelper<Data, AggregateFunctionBitmapL2<T, Data, Policy>>
{
private:
static constexpr auto STATE_VERSION_1_MIN_REVISION = 54455;
static constexpr size_t STATE_VERSION_1_MIN_REVISION = 54455;
public:
AggregateFunctionBitmapL2(const DataTypePtr & type)
explicit AggregateFunctionBitmapL2(const DataTypePtr & type)
: IAggregateFunctionDataHelper<Data, AggregateFunctionBitmapL2<T, Data, Policy>>({type}, {})
{
}

View File

@ -151,7 +151,7 @@ public:
/**
* Computes the intersection between two bitmaps
*/
void rb_and(const RoaringBitmapWithSmallSet & r1)
void rb_and(const RoaringBitmapWithSmallSet & r1) /// NOLINT
{
ValueBuffer buffer;
if (isSmall() && r1.isSmall())
@ -195,12 +195,12 @@ public:
/**
* Computes the union between two bitmaps.
*/
void rb_or(const RoaringBitmapWithSmallSet & r1) { merge(r1); }
void rb_or(const RoaringBitmapWithSmallSet & r1) { merge(r1); } /// NOLINT
/**
* Computes the symmetric difference (xor) between two bitmaps.
*/
void rb_xor(const RoaringBitmapWithSmallSet & r1)
void rb_xor(const RoaringBitmapWithSmallSet & r1) /// NOLINT
{
if (isSmall())
toLarge();
@ -212,7 +212,7 @@ public:
/**
* Computes the difference (andnot) between two bitmaps
*/
void rb_andnot(const RoaringBitmapWithSmallSet & r1)
void rb_andnot(const RoaringBitmapWithSmallSet & r1) /// NOLINT
{
ValueBuffer buffer;
if (isSmall() && r1.isSmall())
@ -256,7 +256,7 @@ public:
/**
* Computes the cardinality of the intersection between two bitmaps.
*/
UInt64 rb_and_cardinality(const RoaringBitmapWithSmallSet & r1) const
UInt64 rb_and_cardinality(const RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
UInt64 ret = 0;
if (isSmall() && r1.isSmall())
@ -283,8 +283,8 @@ public:
/**
* Computes the cardinality of the union between two bitmaps.
*/
UInt64 rb_or_cardinality(const RoaringBitmapWithSmallSet & r1) const
*/
UInt64 rb_or_cardinality(const RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
UInt64 c1 = size();
UInt64 c2 = r1.size();
@ -294,8 +294,8 @@ public:
/**
* Computes the cardinality of the symmetric difference (andnot) between two bitmaps.
*/
UInt64 rb_xor_cardinality(const RoaringBitmapWithSmallSet & r1) const
*/
UInt64 rb_xor_cardinality(const RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
UInt64 c1 = size();
UInt64 c2 = r1.size();
@ -306,7 +306,7 @@ public:
/**
* Computes the cardinality of the difference (andnot) between two bitmaps.
*/
UInt64 rb_andnot_cardinality(const RoaringBitmapWithSmallSet & r1) const
UInt64 rb_andnot_cardinality(const RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
UInt64 c1 = size();
UInt64 inter = rb_and_cardinality(r1);
@ -316,7 +316,7 @@ public:
/**
* Return 1 if the two bitmaps contain the same elements.
*/
UInt8 rb_equals(const RoaringBitmapWithSmallSet & r1)
UInt8 rb_equals(const RoaringBitmapWithSmallSet & r1) /// NOLINT
{
if (isSmall())
toLarge();
@ -329,7 +329,7 @@ public:
* Check whether two bitmaps intersect.
* Intersection with an empty set is always 0 (consistent with hasAny).
*/
UInt8 rb_intersect(const RoaringBitmapWithSmallSet & r1) const
UInt8 rb_intersect(const RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
if (isSmall())
{
@ -370,7 +370,7 @@ public:
* Empty set is a subset of any other set (consistent with hasAll).
* It's used in subset and currently only support comparing same type
*/
UInt8 rb_is_subset(const RoaringBitmapWithSmallSet & r1) const
UInt8 rb_is_subset(const RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
if (isSmall())
{
@ -420,7 +420,7 @@ public:
/**
* Check whether this bitmap contains the argument.
*/
UInt8 rb_contains(UInt64 x) const
UInt8 rb_contains(UInt64 x) const /// NOLINT
{
if (!std::is_same_v<T, UInt64> && x > rb_max())
return 0;
@ -434,7 +434,7 @@ public:
/**
* Remove value
*/
void rb_remove(UInt64 x)
void rb_remove(UInt64 x) /// NOLINT
{
if (!std::is_same_v<T, UInt64> && x > rb_max())
return;
@ -451,7 +451,7 @@ public:
* range_end - range_start.
* Areas outside the range are passed through unchanged.
*/
void rb_flip(UInt64 begin, UInt64 end)
void rb_flip(UInt64 begin, UInt64 end) /// NOLINT
{
if (isSmall())
toLarge();
@ -462,7 +462,7 @@ public:
/**
* returns the number of integers that are smaller or equal to offsetid.
*/
UInt64 rb_rank(UInt64 x)
UInt64 rb_rank(UInt64 x) /// NOLINT
{
if (isSmall())
toLarge();
@ -474,7 +474,7 @@ public:
* Convert elements to integer array, return number of elements
*/
template <typename Element>
UInt64 rb_to_array(PaddedPODArray<Element> & res) const
UInt64 rb_to_array(PaddedPODArray<Element> & res) const /// NOLINT
{
UInt64 count = 0;
if (isSmall())
@ -500,7 +500,7 @@ public:
* Return new set with specified range (not include the range_end)
* It's used in subset and currently only support UInt32
*/
UInt64 rb_range(UInt64 range_start, UInt64 range_end, RoaringBitmapWithSmallSet & r1) const
UInt64 rb_range(UInt64 range_start, UInt64 range_end, RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
UInt64 count = 0;
if (range_start >= range_end)
@ -540,7 +540,7 @@ public:
* Return new set of the smallest `limit` values in set which is no less than `range_start`.
* It's used in subset and currently only support UInt32
*/
UInt64 rb_limit(UInt64 range_start, UInt64 limit, RoaringBitmapWithSmallSet & r1) const
UInt64 rb_limit(UInt64 range_start, UInt64 limit, RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
if (limit == 0)
return 0;
@ -586,7 +586,7 @@ public:
}
}
UInt64 rb_offset_limit(UInt64 offset, UInt64 limit, RoaringBitmapWithSmallSet & r1) const
UInt64 rb_offset_limit(UInt64 offset, UInt64 limit, RoaringBitmapWithSmallSet & r1) const /// NOLINT
{
if (limit == 0 || offset >= size())
return 0;
@ -617,7 +617,7 @@ public:
}
}
UInt64 rb_min() const
UInt64 rb_min() const /// NOLINT
{
if (isSmall())
{
@ -636,7 +636,7 @@ public:
return rb->minimum();
}
UInt64 rb_max() const
UInt64 rb_max() const /// NOLINT
{
if (isSmall())
{
@ -659,7 +659,7 @@ public:
* Replace value.
* It's used in transform and currently can only support UInt32
*/
void rb_replace(const UInt64 * from_vals, const UInt64 * to_vals, size_t num)
void rb_replace(const UInt64 * from_vals, const UInt64 * to_vals, size_t num) /// NOLINT
{
if (isSmall())
toLarge();

View File

@ -37,11 +37,11 @@ struct AggregateFunctionGroupUniqArrayData
/// Puts all values to the hash set. Returns an array of unique values. Implemented for numeric types.
template <typename T, typename Tlimit_num_elem>
template <typename T, typename LimitNumElems>
class AggregateFunctionGroupUniqArray
: public IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayData<T>, AggregateFunctionGroupUniqArray<T, Tlimit_num_elem>>
: public IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayData<T>, AggregateFunctionGroupUniqArray<T, LimitNumElems>>
{
static constexpr bool limit_num_elems = Tlimit_num_elem::value;
static constexpr bool limit_num_elems = LimitNumElems::value;
UInt64 max_elems;
private:
@ -50,7 +50,7 @@ private:
public:
AggregateFunctionGroupUniqArray(const DataTypePtr & argument_type, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayData<T>,
AggregateFunctionGroupUniqArray<T, Tlimit_num_elem>>({argument_type}, parameters_),
AggregateFunctionGroupUniqArray<T, LimitNumElems>>({argument_type}, parameters_),
max_elems(max_elems_) {}
String getName() const override { return "groupUniqArray"; }
@ -139,21 +139,21 @@ static void deserializeAndInsertImpl(StringRef str, IColumn & data_to);
/** Template parameter with true value should be used for columns that store their elements in memory continuously.
* For such columns groupUniqArray() can be implemented more efficiently (especially for small numeric arrays).
*/
template <bool is_plain_column = false, typename Tlimit_num_elem = std::false_type>
template <bool is_plain_column = false, typename LimitNumElems = std::false_type>
class AggregateFunctionGroupUniqArrayGeneric
: public IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData,
AggregateFunctionGroupUniqArrayGeneric<is_plain_column, Tlimit_num_elem>>
AggregateFunctionGroupUniqArrayGeneric<is_plain_column, LimitNumElems>>
{
DataTypePtr & input_data_type;
static constexpr bool limit_num_elems = Tlimit_num_elem::value;
static constexpr bool limit_num_elems = LimitNumElems::value;
UInt64 max_elems;
using State = AggregateFunctionGroupUniqArrayGenericData;
public:
AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type_, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData, AggregateFunctionGroupUniqArrayGeneric<is_plain_column, Tlimit_num_elem>>({input_data_type_}, parameters_)
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData, AggregateFunctionGroupUniqArrayGeneric<is_plain_column, LimitNumElems>>({input_data_type_}, parameters_)
, input_data_type(this->argument_types[0])
, max_elems(max_elems_) {}

View File

@ -54,13 +54,12 @@ private:
Mean mean;
Weight weight;
WeightedValue operator+ (const WeightedValue & other)
WeightedValue operator+(const WeightedValue & other) const
{
return {mean + other.weight * (other.mean - mean) / (other.weight + weight), other.weight + weight};
}
};
private:
// quantity of stored weighted-values
UInt32 size;
@ -71,7 +70,6 @@ private:
// Weighted values representation of histogram.
WeightedValue points[0];
private:
void sort()
{
::sort(points, points + size,
@ -87,18 +85,18 @@ private:
size_t size = 0;
T * data_ptr;
PriorityQueueStorage(T * value)
explicit PriorityQueueStorage(T * value)
: data_ptr(value)
{
}
void push_back(T val)
void push_back(T val) /// NOLINT
{
data_ptr[size] = std::move(val);
++size;
}
void pop_back() { --size; }
void pop_back() { --size; } /// NOLINT
T * begin() { return data_ptr; }
T * end() const { return data_ptr + size; }
bool empty() const { return size == 0; }

View File

@ -333,7 +333,7 @@ public:
return std::make_shared<DataTypeNumber<Float64>>();
}
void create(AggregateDataPtr __restrict place) const override
void create(AggregateDataPtr __restrict place) const override /// NOLINT
{
std::shared_ptr<IWeightsUpdater> new_weights_updater;
if (weights_updater_name == "SGD")

View File

@ -149,7 +149,7 @@ public:
if (params[0].getType() != Field::Types::String)
throw Exception("Aggregate function " + getName() + " require first parameter to be a String", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
auto param = params[0].get<String>();
const auto & param = params[0].get<String>();
if (param == "two-sided")
alternative = Alternative::TwoSided;
else if (param == "less")

View File

@ -1130,7 +1130,7 @@ private:
SerializationPtr serialization;
public:
AggregateFunctionsSingleValue(const DataTypePtr & type)
explicit AggregateFunctionsSingleValue(const DataTypePtr & type)
: IAggregateFunctionDataHelper<Data, AggregateFunctionsSingleValue<Data>>({type}, {})
, serialization(type->getDefaultSerialization())
{
@ -1188,7 +1188,7 @@ public:
}
}
void addBatchSinglePlaceNotNull(
void addBatchSinglePlaceNotNull( /// NOLINT
size_t batch_size,
AggregateDataPtr place,
const IColumn ** columns,

View File

@ -77,7 +77,7 @@ protected:
static bool getFlag(ConstAggregateDataPtr __restrict place) noexcept
{
return result_is_nullable ? place[0] : 1;
return result_is_nullable ? place[0] : true;
}
public:
@ -148,7 +148,7 @@ public:
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
{
bool flag = 1;
bool flag = true;
if constexpr (serialize_flag)
readBinary(flag, buf);
if (flag)
@ -306,7 +306,7 @@ public:
}
}
void addBatchSinglePlace(
void addBatchSinglePlace( /// NOLINT
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const override
{
const ColumnNullable * column = assert_cast<const ColumnNullable *>(columns[0]);

View File

@ -108,7 +108,7 @@ public:
place[size_of_data] = 1;
}
void addBatch(
void addBatch( /// NOLINT
size_t batch_size,
AggregateDataPtr * places,
size_t place_offset,
@ -134,7 +134,7 @@ public:
}
}
void addBatchSinglePlace(
void addBatchSinglePlace( /// NOLINT
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const override
{
if (if_argument_pos >= 0)
@ -160,7 +160,7 @@ public:
}
}
void addBatchSinglePlaceNotNull(
void addBatchSinglePlaceNotNull( /// NOLINT
size_t batch_size,
AggregateDataPtr place,
const IColumn ** columns,

View File

@ -77,7 +77,7 @@ public:
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
{
auto & a = this->data(place);
auto & b = this->data(rhs);
const auto & b = this->data(rhs);
a.merge(b, arena);
}

View File

@ -21,7 +21,7 @@ class AggregateFunctionResample final : public IAggregateFunctionHelper<Aggregat
{
private:
/// Sanity threshold to avoid creation of too large arrays. The choice of this number is arbitrary.
const size_t MAX_ELEMENTS = 1048576;
static constexpr size_t max_elements = 1048576;
AggregateFunctionPtr nested_function;
@ -75,7 +75,7 @@ public:
total = (sum - 1) / step; // total = (end - begin + step - 1) / step
}
if (total > MAX_ELEMENTS)
if (total > max_elements)
throw Exception("The range given in function "
+ getName() + " contains too many elements",
ErrorCodes::ARGUMENT_OUT_OF_BOUND);

View File

@ -75,12 +75,12 @@ public:
return "retention";
}
AggregateFunctionRetention(const DataTypes & arguments)
explicit AggregateFunctionRetention(const DataTypes & arguments)
: IAggregateFunctionDataHelper<AggregateFunctionRetentionData, AggregateFunctionRetention>(arguments, {})
{
for (const auto i : collections::range(0, arguments.size()))
{
auto cond_arg = arguments[i].get();
const auto * cond_arg = arguments[i].get();
if (!isUInt8(cond_arg))
throw Exception{"Illegal type " + cond_arg->getName() + " of argument " + toString(i) + " of aggregate function "
+ getName() + ", must be UInt8",

View File

@ -37,7 +37,7 @@ struct ComparePairFirst final
}
};
static constexpr auto max_events = 32;
static constexpr size_t max_events = 32;
template <typename T>
struct AggregateFunctionSequenceMatchData final
@ -187,7 +187,7 @@ private:
std::uint64_t extra;
PatternAction() = default;
PatternAction(const PatternActionType type_, const std::uint64_t extra_ = 0) : type{type_}, extra{extra_} {}
explicit PatternAction(const PatternActionType type_, const std::uint64_t extra_ = 0) : type{type_}, extra{extra_} {}
};
using PatternActions = PODArrayWithStackMemory<PatternAction, 64>;
@ -246,7 +246,7 @@ private:
throw_exception("Unknown time condition");
UInt64 duration = 0;
auto prev_pos = pos;
const auto * prev_pos = pos;
pos = tryReadIntText(duration, pos, end);
if (pos == prev_pos)
throw_exception("Could not parse number");
@ -262,7 +262,7 @@ private:
else
{
UInt64 event_number = 0;
auto prev_pos = pos;
const auto * prev_pos = pos;
pos = tryReadIntText(event_number, pos, end);
if (pos == prev_pos)
throw_exception("Could not parse number");
@ -580,7 +580,7 @@ private:
struct DFAState
{
DFAState(bool has_kleene_ = false)
explicit DFAState(bool has_kleene_ = false)
: has_kleene{has_kleene_}, event{0}, transition{DFATransition::None}
{}

View File

@ -216,7 +216,7 @@ public:
a.value.push_back(v->clone(arena), arena);
}
void create(AggregateDataPtr place) const override
void create(AggregateDataPtr place) const override /// NOLINT
{
new (place) Data;
}

View File

@ -185,18 +185,18 @@ private:
std::optional<Float64> max_y;
std::optional<Float64> new_y;
std::vector<std::optional<Float64>> newPoints;
newPoints.reserve(width);
std::vector<std::optional<Float64>> new_points;
new_points.reserve(width);
std::pair<size_t, Float64> bound{0, 0.0};
size_t cur_bucket_num = 0;
// upper bound for bucket
auto upperBound = [&](size_t bucket_num)
auto upper_bound = [&](size_t bucket_num)
{
bound.second = (bucket_num + 1) * multiple_d;
bound.first = std::floor(bound.second);
};
upperBound(cur_bucket_num);
upper_bound(cur_bucket_num);
for (size_t i = 0; i <= (diff_x + 1); ++i)
{
if (i == bound.first) // is bound
@ -211,7 +211,7 @@ private:
{
Float64 avg_y = new_y.value() / multiple_d;
newPoints.emplace_back(avg_y);
new_points.emplace_back(avg_y);
// If min_y has no value, or if the avg_y of the current bucket is less than min_y, update it.
if (!min_y || avg_y < min_y)
min_y = avg_y;
@ -220,12 +220,12 @@ private:
}
else
{
newPoints.emplace_back();
new_points.emplace_back();
}
// next bucket
new_y = found ? ((1 - proportion) * it->getMapped()) : std::optional<Float64>();
upperBound(++cur_bucket_num);
upper_bound(++cur_bucket_num);
}
else
{
@ -240,19 +240,19 @@ private:
Float64 diff_y = max_y.value() - min_y.value();
auto getBars = [&] (const std::optional<Float64> & point_y)
auto get_bars = [&] (const std::optional<Float64> & point_y)
{
value += getBar(point_y ? std::round(((point_y.value() - min_y.value()) / diff_y) * 7) + 1 : 0);
};
auto getBarsForConstant = [&] (const std::optional<Float64> & point_y)
auto get_bars_for_constant = [&] (const std::optional<Float64> & point_y)
{
value += getBar(point_y ? 1 : 0);
};
if (diff_y)
std::for_each(newPoints.begin(), newPoints.end(), getBars);
std::for_each(new_points.begin(), new_points.end(), get_bars);
else
std::for_each(newPoints.begin(), newPoints.end(), getBarsForConstant);
std::for_each(new_points.begin(), new_points.end(), get_bars_for_constant);
}
return value;
}

View File

@ -114,7 +114,7 @@ class AggregateFunctionVariance final
: public IAggregateFunctionDataHelper<AggregateFunctionVarianceData<T, Op>, AggregateFunctionVariance<T, Op>>
{
public:
AggregateFunctionVariance(const DataTypePtr & arg)
explicit AggregateFunctionVariance(const DataTypePtr & arg)
: IAggregateFunctionDataHelper<AggregateFunctionVarianceData<T, Op>, AggregateFunctionVariance<T, Op>>({arg}, {}) {}
String getName() const override { return Op::name; }
@ -249,7 +249,6 @@ protected:
readBinary(right_m2, buf);
}
protected:
Float64 left_m2 = 0.0;
Float64 right_m2 = 0.0;
};
@ -367,7 +366,7 @@ class AggregateFunctionCovariance final
AggregateFunctionCovariance<T, U, Op, compute_marginal_moments>>
{
public:
AggregateFunctionCovariance(const DataTypes & args) : IAggregateFunctionDataHelper<
explicit AggregateFunctionCovariance(const DataTypes & args) : IAggregateFunctionDataHelper<
CovarianceData<T, U, Op, compute_marginal_moments>,
AggregateFunctionCovariance<T, U, Op, compute_marginal_moments>>(args, {}) {}

View File

@ -80,7 +80,7 @@ public:
using ResultType = typename StatFunc::ResultType;
using ColVecResult = ColumnVector<ResultType>;
AggregateFunctionVarianceSimple(const DataTypes & argument_types_)
explicit AggregateFunctionVarianceSimple(const DataTypes & argument_types_)
: IAggregateFunctionDataHelper<typename StatFunc::Data, AggregateFunctionVarianceSimple<StatFunc>>(argument_types_, {})
, src_scale(0)
{}

View File

@ -97,7 +97,7 @@ struct AggregateFunctionSumData
template <typename Value, bool add_if_zero>
void NO_SANITIZE_UNDEFINED NO_INLINE
addManyConditional_internal(const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t count)
addManyConditionalInternal(const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t count)
{
const auto * end = ptr + count;
@ -124,7 +124,8 @@ struct AggregateFunctionSumData
/// For floating point we use a similar trick as above, except that now we reinterpret the floating point number as an unsigned
/// integer of the same size and use a mask instead (0 to discard, 0xFF..FF to keep)
static_assert(sizeof(Value) == 4 || sizeof(Value) == 8);
typedef typename std::conditional_t<sizeof(Value) == 4, UInt32, UInt64> equivalent_integer;
using equivalent_integer = typename std::conditional_t<sizeof(Value) == 4, UInt32, UInt64>;
constexpr size_t unroll_count = 128 / sizeof(T);
T partial_sums[unroll_count]{};
@ -163,13 +164,13 @@ struct AggregateFunctionSumData
template <typename Value>
void ALWAYS_INLINE addManyNotNull(const Value * __restrict ptr, const UInt8 * __restrict null_map, size_t count)
{
return addManyConditional_internal<Value, true>(ptr, null_map, count);
return addManyConditionalInternal<Value, true>(ptr, null_map, count);
}
template <typename Value>
void ALWAYS_INLINE addManyConditional(const Value * __restrict ptr, const UInt8 * __restrict cond_map, size_t count)
{
return addManyConditional_internal<Value, false>(ptr, cond_map, count);
return addManyConditionalInternal<Value, false>(ptr, cond_map, count);
}
void NO_SANITIZE_UNDEFINED merge(const AggregateFunctionSumData & rhs)
@ -248,7 +249,7 @@ struct AggregateFunctionSumKahanData
}
template <typename Value, bool add_if_zero>
void NO_INLINE addManyConditional_internal(const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t count)
void NO_INLINE addManyConditionalInternal(const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t count)
{
constexpr size_t unroll_count = 4;
T partial_sums[unroll_count]{};
@ -281,13 +282,13 @@ struct AggregateFunctionSumKahanData
template <typename Value>
void ALWAYS_INLINE addManyNotNull(const Value * __restrict ptr, const UInt8 * __restrict null_map, size_t count)
{
return addManyConditional_internal<Value, true>(ptr, null_map, count);
return addManyConditionalInternal<Value, true>(ptr, null_map, count);
}
template <typename Value>
void ALWAYS_INLINE addManyConditional(const Value * __restrict ptr, const UInt8 * __restrict cond_map, size_t count)
{
return addManyConditional_internal<Value, false>(ptr, cond_map, count);
return addManyConditionalInternal<Value, false>(ptr, cond_map, count);
}
void ALWAYS_INLINE mergeImpl(T & to_sum, T & to_compensation, T from_sum, T from_compensation)
@ -351,7 +352,7 @@ public:
__builtin_unreachable();
}
AggregateFunctionSum(const DataTypes & argument_types_)
explicit AggregateFunctionSum(const DataTypes & argument_types_)
: IAggregateFunctionDataHelper<Data, AggregateFunctionSum<T, TResult, Data, Type>>(argument_types_, {})
, scale(0)
{}

View File

@ -489,15 +489,15 @@ public:
"Aggregate function '{}' requires exactly one parameter "
"of Array type", getName());
Array keys_to_keep_;
if (!params_.front().tryGet<Array>(keys_to_keep_))
Array keys_to_keep_values;
if (!params_.front().tryGet<Array>(keys_to_keep_values))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Aggregate function {} requires an Array as a parameter",
getName());
keys_to_keep.reserve(keys_to_keep_.size());
keys_to_keep.reserve(keys_to_keep_values.size());
for (const Field & f : keys_to_keep_)
for (const Field & f : keys_to_keep_values)
keys_to_keep.emplace(f.safeGet<T>());
}

View File

@ -64,7 +64,7 @@ namespace ErrorCodes
* Both WelchTTest and StudentTTest have t-statistric with Student distribution but with different degrees of freedom.
* So the procedure of computing p-value is the same.
*/
static inline Float64 getPValue(Float64 degrees_of_freedom, Float64 t_stat2)
static inline Float64 getPValue(Float64 degrees_of_freedom, Float64 t_stat2) /// NOLINT
{
Float64 numerator = integrateSimpson(0, degrees_of_freedom / (t_stat2 + degrees_of_freedom),
[degrees_of_freedom](double x) { return std::pow(x, degrees_of_freedom / 2 - 1) / std::sqrt(1 - x); });
@ -92,7 +92,7 @@ public:
AggregateFunctionTTest(const DataTypes & arguments, const Array & params)
: IAggregateFunctionDataHelper<Data, AggregateFunctionTTest<Data>>({arguments}, params)
{
if (params.size() > 0)
if (!params.empty())
{
need_confidence_interval = true;
confidence_level = params.at(0).safeGet<Float64>();

View File

@ -20,7 +20,7 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
static constexpr auto max_events = 32;
static constexpr size_t max_events = 32;
template <typename T>
struct AggregateFunctionWindowFunnelData

View File

@ -117,7 +117,7 @@ template <typename Data>
class AggregateFunctionCrossTab : public IAggregateFunctionDataHelper<Data, AggregateFunctionCrossTab<Data>>
{
public:
AggregateFunctionCrossTab(const DataTypes & arguments)
explicit AggregateFunctionCrossTab(const DataTypes & arguments)
: IAggregateFunctionDataHelper<Data, AggregateFunctionCrossTab<Data>>({arguments}, {})
{
}

View File

@ -55,7 +55,7 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
{
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<TYPE>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<TYPE>>(std::forward<TArgs>(args)...); /// NOLINT
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Data<Int8>>(std::forward<TArgs>(args)...);
@ -94,7 +94,7 @@ static IAggregateFunction * createWithNumericType(const IDataType & argument_typ
{
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, Data<TYPE>>(std::forward<TArgs>(args)...);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<TYPE, Data<TYPE>>(std::forward<TArgs>(args)...); /// NOLINT
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
if (which.idx == TypeIndex::Enum8) return new AggregateFunctionTemplate<Int8, Data<Int8>>(std::forward<TArgs>(args)...);
@ -121,7 +121,7 @@ static IAggregateFunction * createWithBasicNumberOrDateOrDateTime(const IDataTyp
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return new AggregateFunctionTemplate<TYPE, Data<TYPE>>(std::forward<TArgs>(args)...);
return new AggregateFunctionTemplate<TYPE, Data<TYPE>>(std::forward<TArgs>(args)...); /// NOLINT
FOR_BASIC_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH

View File

@ -25,7 +25,7 @@ static IAggregateFunction * createAggregateFunctionSingleValue(const String & na
WhichDataType which(argument_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<TYPE>>>(argument_type);
if (which.idx == TypeIndex::TYPE) return new AggregateFunctionTemplate<Data<SingleValueDataFixed<TYPE>>>(argument_type); /// NOLINT
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
@ -56,7 +56,7 @@ static IAggregateFunction * createAggregateFunctionArgMinMaxSecond(const DataTyp
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<TYPE>>>>(res_type, val_type);
return new AggregateFunctionArgMinMax<AggregateFunctionArgMinMaxData<ResData, MinMaxData<SingleValueDataFixed<TYPE>>>>(res_type, val_type); /// NOLINT
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH
@ -90,7 +90,7 @@ static IAggregateFunction * createAggregateFunctionArgMinMax(const String & name
WhichDataType which(res_type);
#define DISPATCH(TYPE) \
if (which.idx == TypeIndex::TYPE) \
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<TYPE>>(res_type, val_type);
return createAggregateFunctionArgMinMaxSecond<MinMaxData, SingleValueDataFixed<TYPE>>(res_type, val_type); /// NOLINT
FOR_NUMERIC_TYPES(DISPATCH)
#undef DISPATCH

View File

@ -127,10 +127,10 @@ public:
virtual void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const = 0;
/// Serializes state (to transmit it over the network, for example).
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0;
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0; /// NOLINT
/// Deserializes state. This function is called only for empty (just created) states.
virtual void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version = std::nullopt, Arena * arena = nullptr) const = 0;
virtual void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version = std::nullopt, Arena * arena = nullptr) const = 0; /// NOLINT
/// Returns true if a function requires Arena to handle own states (see add(), merge(), deserialize()).
virtual bool allocatesMemoryInArena() const = 0;
@ -174,7 +174,7 @@ public:
/** Contains a loop with calls to "add" function. You can collect arguments into array "places"
* and do a single call to "addBatch" for devirtualization and inlining.
*/
virtual void addBatch(
virtual void addBatch( /// NOLINT
size_t batch_size,
AggregateDataPtr * places,
size_t place_offset,
@ -198,7 +198,7 @@ public:
/** The same for single place.
*/
virtual void addBatchSinglePlace(
virtual void addBatchSinglePlace( /// NOLINT
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const = 0;
/// The version of "addBatchSinglePlace", that handle sparse columns as arguments.
@ -208,7 +208,7 @@ public:
/** The same for single place when need to aggregate only filtered data.
* Instead of using an if-column, the condition is combined inside the null_map
*/
virtual void addBatchSinglePlaceNotNull(
virtual void addBatchSinglePlaceNotNull( /// NOLINT
size_t batch_size,
AggregateDataPtr place,
const IColumn ** columns,
@ -216,7 +216,7 @@ public:
Arena * arena,
ssize_t if_argument_pos = -1) const = 0;
virtual void addBatchSinglePlaceFromInterval(
virtual void addBatchSinglePlaceFromInterval( /// NOLINT
size_t batch_begin, size_t batch_end, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1)
const = 0;
@ -354,7 +354,7 @@ public:
AddFunc getAddressOfAddFunction() const override { return &addFree; }
void addBatch(
void addBatch( /// NOLINT
size_t batch_size,
AggregateDataPtr * places,
size_t place_offset,
@ -407,7 +407,7 @@ public:
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
}
void addBatchSinglePlace(
void addBatchSinglePlace( /// NOLINT
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const override
{
if (if_argument_pos >= 0)
@ -439,7 +439,7 @@ public:
static_cast<const Derived *>(this)->add(place, &values, offset_it.getValueIndex(), arena);
}
void addBatchSinglePlaceNotNull(
void addBatchSinglePlaceNotNull( /// NOLINT
size_t batch_size,
AggregateDataPtr place,
const IColumn ** columns,
@ -462,7 +462,7 @@ public:
}
}
void addBatchSinglePlaceFromInterval(
void addBatchSinglePlaceFromInterval( /// NOLINT
size_t batch_begin, size_t batch_end, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1)
const override
{
@ -586,7 +586,7 @@ public:
IAggregateFunctionDataHelper(const DataTypes & argument_types_, const Array & parameters_)
: IAggregateFunctionHelper<Derived>(argument_types_, parameters_) {}
void create(AggregateDataPtr place) const override
void create(AggregateDataPtr place) const override /// NOLINT
{
new (place) Data;
}

View File

@ -68,7 +68,7 @@ public:
const DataTypes & arguments,
const Array & params) const = 0;
virtual ~IAggregateFunctionCombinator() {}
virtual ~IAggregateFunctionCombinator() = default;
};
using AggregateFunctionCombinatorPtr = std::shared_ptr<const IAggregateFunctionCombinator>;

View File

@ -65,7 +65,7 @@ template <typename T, ReservoirSamplerOnEmpty::Enum OnEmpty = ReservoirSamplerOn
class ReservoirSampler
{
public:
ReservoirSampler(size_t sample_count_ = DEFAULT_SAMPLE_COUNT)
explicit ReservoirSampler(size_t sample_count_ = DEFAULT_SAMPLE_COUNT)
: sample_count(sample_count_)
{
rng.seed(123456);
@ -111,7 +111,7 @@ public:
sortIfNeeded();
double index = level * (samples.size() - 1);
size_t int_index = static_cast<size_t>(index + 0.5);
size_t int_index = static_cast<size_t>(index + 0.5); /// NOLINT
int_index = std::max(0LU, std::min(samples.size() - 1, int_index));
return samples[int_index];
}
@ -190,7 +190,7 @@ public:
}
else
{
for (double i = 0; i < sample_count; i += frequency)
for (double i = 0; i < sample_count; i += frequency) /// NOLINT
samples[i] = b.samples[i];
}
}

View File

@ -67,7 +67,7 @@ private:
}
public:
ReservoirSamplerDeterministic(const size_t max_sample_size_ = detail::DEFAULT_MAX_SAMPLE_SIZE)
explicit ReservoirSamplerDeterministic(const size_t max_sample_size_ = detail::DEFAULT_MAX_SAMPLE_SIZE)
: max_sample_size{max_sample_size_}
{
}
@ -103,7 +103,7 @@ public:
sortIfNeeded();
double index = level * (samples.size() - 1);
size_t int_index = static_cast<size_t>(index + 0.5);
size_t int_index = static_cast<size_t>(index + 0.5); /// NOLINT
int_index = std::max(0LU, std::min(samples.size() - 1, int_index));
return samples[int_index].first;
}

View File

@ -6,6 +6,7 @@
#include <boost/noncopyable.hpp>
#include <memory>
#include <base/StringRef.h>
#include <theta_sketch.hpp>
#include <theta_union.hpp>

View File

@ -105,8 +105,8 @@ private:
}
}
inline size_t buf_size() const { return 1ULL << size_degree; }
inline size_t max_fill() const { return 1ULL << (size_degree - 1); }
inline size_t buf_size() const { return 1ULL << size_degree; } /// NOLINT
inline size_t max_fill() const { return 1ULL << (size_degree - 1); } /// NOLINT
inline size_t mask() const { return buf_size() - 1; }
inline size_t place(HashValue x) const { return (x >> UNIQUES_HASH_BITS_FOR_SKIP) & mask(); }
@ -304,8 +304,11 @@ public:
memcpy(buf, rhs.buf, buf_size() * sizeof(buf[0]));
}
UniquesHashSet & operator= (const UniquesHashSet & rhs)
UniquesHashSet & operator=(const UniquesHashSet & rhs)
{
if (&rhs == this)
return *this;
if (size_degree != rhs.size_degree)
{
free();

View File

@ -12,7 +12,7 @@ class BackupEntryFromAppendOnlyFile : public BackupEntryFromImmutableFile
{
public:
/// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data.
BackupEntryFromAppendOnlyFile(
explicit BackupEntryFromAppendOnlyFile(
const String & file_path_,
const std::optional<UInt64> & file_size_ = {},
const std::optional<UInt128> & checksum_ = {},

View File

@ -16,7 +16,7 @@ class BackupEntryFromImmutableFile : public IBackupEntry
{
public:
/// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data.
BackupEntryFromImmutableFile(
explicit BackupEntryFromImmutableFile(
const String & file_path_,
const std::optional<UInt64> & file_size_ = {},
const std::optional<UInt128> & checksum_ = {},

View File

@ -13,7 +13,7 @@ class BackupEntryFromMemory : public IBackupEntry
public:
/// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data.
BackupEntryFromMemory(const void * data_, size_t size_, const std::optional<UInt128> & checksum_ = {});
BackupEntryFromMemory(String data_, const std::optional<UInt128> & checksum_ = {});
explicit BackupEntryFromMemory(String data_, const std::optional<UInt128> & checksum_ = {});
UInt64 getSize() const override { return data.size(); }
std::optional<UInt128> getChecksum() const override { return checksum; }

View File

@ -14,7 +14,7 @@ class BackupEntryFromSmallFile : public BackupEntryFromMemory
{
public:
/// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data.
BackupEntryFromSmallFile(
explicit BackupEntryFromSmallFile(
const String & file_path_,
const std::optional<UInt128> & checksum_ = {});

View File

@ -16,7 +16,7 @@ using BackupEntryPtr = std::unique_ptr<IBackupEntry>;
class IBackup : public std::enable_shared_from_this<IBackup>, public TypePromotion<IBackup>
{
public:
IBackup() {}
IBackup() = default;
virtual ~IBackup() = default;
/// Name of the backup.
@ -44,7 +44,7 @@ public:
/// before the terminator. For example, list("", "") returns names of all the entries
/// in the backup; and list("data/", "/") return kind of a list of folders and
/// files stored in the "data/" directory inside the backup.
virtual Strings listFiles(const String & prefix = "", const String & terminator = "/") const = 0;
virtual Strings listFiles(const String & prefix = "", const String & terminator = "/") const = 0; /// NOLINT
/// Checks if an entry with a specified name exists.
virtual bool fileExists(const String & file_name) const = 0;

View File

@ -36,6 +36,8 @@
#include <Storages/ColumnsDescription.h>
#include <Client/ClientBaseHelpers.h>
#include <Client/TestHint.h>
#include "TestTags.h"
#include <Parsers/parseQuery.h>
#include <Parsers/ParserQuery.h>
@ -1483,6 +1485,219 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
}
bool ClientBase::executeMultiQuery(const String & all_queries_text)
{
// It makes sense not to base any control flow on this, so that it is
// the same in tests and in normal usage. The only difference is that in
// normal mode we ignore the test hints.
const bool test_mode = config().has("testmode");
if (test_mode)
{
/// disable logs if expects errors
TestHint test_hint(test_mode, all_queries_text);
if (test_hint.clientError() || test_hint.serverError())
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
}
bool echo_query = echo_queries;
/// Test tags are started with "--" so they are interpreted as comments anyway.
/// But if the echo is enabled we have to remove the test tags from `all_queries_text`
/// because we don't want test tags to be echoed.
size_t test_tags_length = test_mode ? getTestTagsLength(all_queries_text) : 0;
/// Several queries separated by ';'.
/// INSERT data is ended by the end of line, not ';'.
/// An exception is VALUES format where we also support semicolon in
/// addition to end of line.
const char * this_query_begin = all_queries_text.data() + test_tags_length;
const char * this_query_end;
const char * all_queries_end = all_queries_text.data() + all_queries_text.size();
String full_query; // full_query is the query + inline INSERT data + trailing comments (the latter is our best guess for now).
String query_to_execute;
ASTPtr parsed_query;
std::optional<Exception> current_exception;
while (true)
{
auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end,
query_to_execute, parsed_query, all_queries_text, current_exception);
switch (stage)
{
case MultiQueryProcessingStage::QUERIES_END:
case MultiQueryProcessingStage::PARSING_FAILED:
{
return true;
}
case MultiQueryProcessingStage::CONTINUE_PARSING:
{
continue;
}
case MultiQueryProcessingStage::PARSING_EXCEPTION:
{
this_query_end = find_first_symbols<'\n'>(this_query_end, all_queries_end);
// Try to find test hint for syntax error. We don't know where
// the query ends because we failed to parse it, so we consume
// the entire line.
TestHint hint(test_mode, String(this_query_begin, this_query_end - this_query_begin));
if (hint.serverError())
{
// Syntax errors are considered as client errors
current_exception->addMessage("\nExpected server error '{}'.", hint.serverError());
current_exception->rethrow();
}
if (hint.clientError() != current_exception->code())
{
if (hint.clientError())
current_exception->addMessage("\nExpected client error: " + std::to_string(hint.clientError()));
current_exception->rethrow();
}
/// It's expected syntax error, skip the line
this_query_begin = this_query_end;
current_exception.reset();
continue;
}
case MultiQueryProcessingStage::EXECUTE_QUERY:
{
full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin);
if (query_fuzzer_runs)
{
if (!processWithFuzzing(full_query))
return false;
this_query_begin = this_query_end;
continue;
}
// Now we know for sure where the query ends.
// Look for the hint in the text of query + insert data + trailing
// comments, e.g. insert into t format CSV 'a' -- { serverError 123 }.
// Use the updated query boundaries we just calculated.
TestHint test_hint(test_mode, full_query);
// Echo all queries if asked; makes for a more readable reference file.
echo_query = test_hint.echoQueries().value_or(echo_query);
try
{
processParsedSingleQuery(full_query, query_to_execute, parsed_query, echo_query, false);
}
catch (...)
{
// Surprisingly, this is a client error. A server error would
// have been reported w/o throwing (see onReceiveSeverException()).
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
have_error = true;
}
// Check whether the error (or its absence) matches the test hints
// (or their absence).
bool error_matches_hint = true;
if (have_error)
{
if (test_hint.serverError())
{
if (!server_exception)
{
error_matches_hint = false;
fmt::print(stderr, "Expected server error code '{}' but got no server error (query: {}).\n",
test_hint.serverError(), full_query);
}
else if (server_exception->code() != test_hint.serverError())
{
error_matches_hint = false;
fmt::print(stderr, "Expected server error code: {} but got: {} (query: {}).\n",
test_hint.serverError(), server_exception->code(), full_query);
}
}
if (test_hint.clientError())
{
if (!client_exception)
{
error_matches_hint = false;
fmt::print(stderr, "Expected client error code '{}' but got no client error (query: {}).\n",
test_hint.clientError(), full_query);
}
else if (client_exception->code() != test_hint.clientError())
{
error_matches_hint = false;
fmt::print(stderr, "Expected client error code '{}' but got '{}' (query: {}).\n",
test_hint.clientError(), client_exception->code(), full_query);
}
}
if (!test_hint.clientError() && !test_hint.serverError())
{
// No error was expected but it still occurred. This is the
// default case w/o test hint, doesn't need additional
// diagnostics.
error_matches_hint = false;
}
}
else
{
if (test_hint.clientError())
{
error_matches_hint = false;
fmt::print(stderr,
"The query succeeded but the client error '{}' was expected (query: {}).\n",
test_hint.clientError(), full_query);
}
if (test_hint.serverError())
{
error_matches_hint = false;
fmt::print(stderr,
"The query succeeded but the server error '{}' was expected (query: {}).\n",
test_hint.serverError(), full_query);
}
}
// If the error is expected, force reconnect and ignore it.
if (have_error && error_matches_hint)
{
client_exception.reset();
server_exception.reset();
have_error = false;
if (!connection->checkConnected())
connect();
}
// For INSERTs with inline data: use the end of inline data as
// reported by the format parser (it is saved in sendData()).
// This allows us to handle queries like:
// insert into t values (1); select 1
// , where the inline data is delimited by semicolon and not by a
// newline.
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
if (insert_ast && isSyncInsertWithData(*insert_ast, global_context))
{
this_query_end = insert_ast->end;
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
}
// Report error.
if (have_error)
processError(full_query);
// Stop processing queries if needed.
if (have_error && !ignore_error)
return is_interactive;
this_query_begin = this_query_end;
break;
}
}
}
}
bool ClientBase::processQueryText(const String & text)
{
if (exit_strings.end() != exit_strings.find(trim(text, [](char c) { return isWhitespaceASCII(c) || c == ';'; })))
@ -1967,6 +2182,8 @@ void ClientBase::init(int argc, char ** argv)
("suggestion_limit", po::value<int>()->default_value(10000),
"Suggestion limit for how many databases, tables and columns to fetch.")
("testmode,T", "enable test hints in comments")
("format,f", po::value<std::string>(), "default output format")
("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command")
("highlight", po::value<bool>()->default_value(true), "enable or disable basic syntax highlight in interactive command line")
@ -2072,6 +2289,8 @@ void ClientBase::init(int argc, char ** argv)
config().setBool("interactive", true);
if (options.count("pager"))
config().setString("pager", options["pager"].as<std::string>());
if (options.count("testmode"))
config().setBool("testmode", true);
if (options.count("log-level"))
Poco::Logger::root().setLevel(options["log-level"].as<std::string>());

View File

@ -61,7 +61,6 @@ protected:
throw Exception("Query processing with fuzzing is not implemented", ErrorCodes::NOT_IMPLEMENTED);
}
virtual bool executeMultiQuery(const String & all_queries_text) = 0;
virtual void connect() = 0;
virtual void processError(const String & query) const = 0;
virtual String getName() const = 0;
@ -77,6 +76,7 @@ protected:
ASTPtr parseQuery(const char *& pos, const char * end, bool allow_multi_statements) const;
static void setupSignalHandler();
bool executeMultiQuery(const String & all_queries_text);
MultiQueryProcessingStage analyzeMultiQueryText(
const char *& this_query_begin, const char *& this_query_end, const char * all_queries_end,
String & query_to_execute, ASTPtr & parsed_query, const String & all_queries_text,
@ -103,7 +103,6 @@ protected:
const std::vector<Arguments> & hosts_and_ports_arguments) = 0;
virtual void processConfig() = 0;
protected:
bool processQueryText(const String & text);
private:
@ -256,7 +255,7 @@ protected:
struct HostAndPort
{
String host;
UInt16 port;
std::optional<UInt16> port;
};
std::vector<HostAndPort> hosts_and_ports{};

View File

@ -25,7 +25,9 @@ namespace ErrorCodes
ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfiguration & config,
std::string connection_host,
int connection_port) : host(connection_host), port(connection_port)
std::optional<UInt16> connection_port)
: host(connection_host)
, port(connection_port.value_or(getPortFromConfig(config)))
{
bool is_secure = config.getBool("secure", false);
security = is_secure ? Protocol::Secure::Enable : Protocol::Secure::Disable;
@ -73,7 +75,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
{
}
int ConnectionParameters::getPortFromConfig(const Poco::Util::AbstractConfiguration & config)
UInt16 ConnectionParameters::getPortFromConfig(const Poco::Util::AbstractConfiguration & config)
{
bool is_secure = config.getBool("secure", false);
return config.getInt("port",

View File

@ -22,11 +22,11 @@ struct ConnectionParameters
Protocol::Compression compression = Protocol::Compression::Enable;
ConnectionTimeouts timeouts;
ConnectionParameters() {}
ConnectionParameters() = default;
ConnectionParameters(const Poco::Util::AbstractConfiguration & config);
ConnectionParameters(const Poco::Util::AbstractConfiguration & config, std::string host, int port);
ConnectionParameters(const Poco::Util::AbstractConfiguration & config, std::string host, std::optional<UInt16> port);
static int getPortFromConfig(const Poco::Util::AbstractConfiguration & config);
static UInt16 getPortFromConfig(const Poco::Util::AbstractConfiguration & config);
};
}

View File

@ -25,12 +25,11 @@ class IConnectionPool : private boost::noncopyable
public:
using Entry = PoolBase<Connection>::Entry;
public:
virtual ~IConnectionPool() = default;
/// Selects the connection to work.
/// If force_connected is false, the client must manually ensure that returned connection is good.
virtual Entry get(const ConnectionTimeouts & timeouts,
virtual Entry get(const ConnectionTimeouts & timeouts, /// NOLINT
const Settings * settings = nullptr,
bool force_connected = true) = 0;
@ -76,7 +75,7 @@ public:
{
}
Entry get(const ConnectionTimeouts & timeouts,
Entry get(const ConnectionTimeouts & timeouts, /// NOLINT
const Settings * settings = nullptr,
bool force_connected = true) override
{

View File

@ -109,7 +109,6 @@ private:
GetPriorityFunc makeGetPriorityFunc(const Settings * settings);
private:
std::vector<size_t> hostname_differences; /// Distances from name of this host to the names of hosts of pools.
size_t last_used = 0; /// Last used for round_robin policy.
LoadBalancing default_load_balancing;

View File

@ -22,7 +22,7 @@ struct PocoSocketWrapper : public Poco::Net::SocketImpl
~PocoSocketWrapper() override { reset(-1); }
};
void IConnections::DrainCallback::operator()(int fd, Poco::Timespan, const std::string fd_description) const
void IConnections::DrainCallback::operator()(int fd, Poco::Timespan, const std::string & fd_description) const
{
if (!PocoSocketWrapper(fd).poll(drain_timeout, Poco::Net::Socket::SELECT_READ))
{

View File

@ -16,7 +16,7 @@ public:
struct DrainCallback
{
Poco::Timespan drain_timeout;
void operator()(int fd, Poco::Timespan, const std::string fd_description = "") const;
void operator()(int fd, Poco::Timespan, const std::string & fd_description = "") const;
};
/// Send all scalars to replicas.

View File

@ -14,7 +14,6 @@
#include <Storages/MergeTree/RequestResponse.h>
#include <boost/noncopyable.hpp>

View File

@ -546,7 +546,7 @@ void QueryFuzzer::fuzz(ASTPtr & ast)
* small probability. Do this after we add this fuzzer to CI and fix all the
* problems it can routinely find even in this boring version.
*/
void QueryFuzzer::collectFuzzInfoMain(const ASTPtr ast)
void QueryFuzzer::collectFuzzInfoMain(ASTPtr ast)
{
collectFuzzInfoRecurse(ast);
@ -569,7 +569,7 @@ void QueryFuzzer::collectFuzzInfoMain(const ASTPtr ast)
}
}
void QueryFuzzer::addTableLike(const ASTPtr ast)
void QueryFuzzer::addTableLike(ASTPtr ast)
{
if (table_like_map.size() > 1000)
{
@ -583,7 +583,7 @@ void QueryFuzzer::addTableLike(const ASTPtr ast)
}
}
void QueryFuzzer::addColumnLike(const ASTPtr ast)
void QueryFuzzer::addColumnLike(ASTPtr ast)
{
if (column_like_map.size() > 1000)
{
@ -606,7 +606,7 @@ void QueryFuzzer::addColumnLike(const ASTPtr ast)
}
}
void QueryFuzzer::collectFuzzInfoRecurse(const ASTPtr ast)
void QueryFuzzer::collectFuzzInfoRecurse(ASTPtr ast)
{
if (auto * impl = dynamic_cast<ASTWithAlias *>(ast.get()))
{

View File

@ -71,10 +71,10 @@ struct QueryFuzzer
void fuzzWindowFrame(ASTWindowDefinition & def);
void fuzz(ASTs & asts);
void fuzz(ASTPtr & ast);
void collectFuzzInfoMain(const ASTPtr ast);
void addTableLike(const ASTPtr ast);
void addColumnLike(const ASTPtr ast);
void collectFuzzInfoRecurse(const ASTPtr ast);
void collectFuzzInfoMain(ASTPtr ast);
void addTableLike(ASTPtr ast);
void addColumnLike(ASTPtr ast);
void collectFuzzInfoRecurse(ASTPtr ast);
};
}

View File

@ -33,7 +33,7 @@ public:
private:
AvailableCollationLocales();
private:
AvailableLocalesMap locales_map;
};

View File

@ -92,7 +92,7 @@ private:
/// Create a new column that has another column as a source.
MutablePtr createView() const;
ColumnAggregateFunction(const AggregateFunctionPtr & func_, std::optional<size_t> version_ = std::nullopt);
explicit ColumnAggregateFunction(const AggregateFunctionPtr & func_, std::optional<size_t> version_ = std::nullopt);
ColumnAggregateFunction(const AggregateFunctionPtr & func_, const ConstArenas & arenas_);

Some files were not shown because too many files have changed in this diff Show More