mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 17:41:59 +00:00
wMerge branch 'master' of github.com:yandex/ClickHouse into system_metric_log
This commit is contained in:
commit
ed6fbc2c5c
@ -185,7 +185,7 @@
|
||||
* Fixed "select_format" performance test for `Pretty` formats [#5642](https://github.com/yandex/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
|
||||
|
||||
## ClickHouse release 19.9.4.1, 2019-07-05
|
||||
## ClickHouse release 19.9.3.31, 2019-07-05
|
||||
|
||||
### Bug Fix
|
||||
* Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [#5786](https://github.com/yandex/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin))
|
||||
@ -197,7 +197,7 @@
|
||||
* Fix race condition, which cause that some queries may not appear in query_log instantly after SYSTEM FLUSH LOGS query. [#5685](https://github.com/yandex/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ))
|
||||
* Added missing support for constant arguments to `evalMLModel` function. [#5820](https://github.com/yandex/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
|
||||
## ClickHouse release 19.7.6.1, 2019-07-05
|
||||
## ClickHouse release 19.7.5.29, 2019-07-05
|
||||
|
||||
### Bug Fix
|
||||
* Fix performance regression in some queries with JOIN. [#5192](https://github.com/yandex/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014))
|
||||
|
@ -437,10 +437,10 @@ message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE
|
||||
|
||||
include(GNUInstallDirs)
|
||||
include (cmake/find_contrib_lib.cmake)
|
||||
include (cmake/lib_name.cmake)
|
||||
|
||||
find_contrib_lib(double-conversion) # Must be before parquet
|
||||
include (cmake/find_ssl.cmake)
|
||||
include (cmake/lib_name.cmake)
|
||||
include (cmake/find_icu.cmake)
|
||||
include (cmake/find_boost.cmake)
|
||||
include (cmake/find_zlib.cmake)
|
||||
|
@ -1,4 +1,6 @@
|
||||
option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Michael Parakhin" ${NOT_UNBUNDLED})
|
||||
if (NOT ARCH_ARM AND NOT OS_FREEBSD)
|
||||
option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${NOT_UNBUNDLED})
|
||||
endif ()
|
||||
|
||||
if (ENABLE_FASTOPS)
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fastops/fastops/fastops.h")
|
||||
@ -12,4 +14,4 @@ else ()
|
||||
set(USE_FASTOPS 0)
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using fastops")
|
||||
message (STATUS "Using fastops=${USE_FASTOPS}: ${FASTOPS_INCLUDE_DIR} : ${FASTOPS_LIBRARY}")
|
||||
|
@ -48,7 +48,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow -Wshadow-uncaptured-local -Wextra-semi -Wcomma -Winconsistent-missing-destructor-override -Wunused-exception-parameter -Wcovered-switch-default -Wold-style-cast -Wrange-loop-analysis -Wunused-member-function -Wunreachable-code -Wunreachable-code-return -Wnewline-eof -Wembedded-directive -Wgnu-case-range -Wunused-macros -Wconditional-uninitialized -Wdeprecated -Wundef -Wreserved-id-macro -Wredundant-parens -Wzero-as-null-pointer-constant")
|
||||
|
||||
if (WEVERYTHING)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-shadow-field-in-constructor -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed")
|
||||
|
||||
# TODO Enable conversion, sign-conversion, double-promotion warnings.
|
||||
endif ()
|
||||
@ -71,7 +71,9 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-ctad-maybe-unsupported")
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow")
|
||||
endif()
|
||||
|
||||
if (USE_DEBUG_HELPERS)
|
||||
set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/libs/libcommon/include/common/iostream_debug_helpers.h")
|
||||
|
@ -711,7 +711,7 @@ private:
|
||||
if (ignore_error)
|
||||
{
|
||||
Tokens tokens(begin, end);
|
||||
TokenIterator token_iterator(tokens);
|
||||
IParser::Pos token_iterator(tokens);
|
||||
while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid())
|
||||
++token_iterator;
|
||||
begin = token_iterator->end;
|
||||
|
@ -123,7 +123,7 @@ enum class TaskState
|
||||
struct TaskStateWithOwner
|
||||
{
|
||||
TaskStateWithOwner() = default;
|
||||
TaskStateWithOwner(TaskState state, const String & owner) : state(state), owner(owner) {}
|
||||
TaskStateWithOwner(TaskState state_, const String & owner_) : state(state_), owner(owner_) {}
|
||||
|
||||
TaskState state{TaskState::Unknown};
|
||||
String owner;
|
||||
@ -2100,9 +2100,9 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self)
|
||||
|
||||
// process_id is '<hostname>#<start_timestamp>_<pid>'
|
||||
time_t timestamp = Poco::Timestamp().epochTime();
|
||||
auto pid = Poco::Process::id();
|
||||
auto curr_pid = Poco::Process::id();
|
||||
|
||||
process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(pid);
|
||||
process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid);
|
||||
host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id;
|
||||
process_path = Poco::Path(base_dir + "/clickhouse-copier_" + process_id).absolute().toString();
|
||||
Poco::File(process_path).createDirectories();
|
||||
|
@ -176,7 +176,7 @@ private:
|
||||
const UInt64 seed;
|
||||
|
||||
public:
|
||||
UnsignedIntegerModel(UInt64 seed) : seed(seed) {}
|
||||
UnsignedIntegerModel(UInt64 seed_) : seed(seed_) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -212,7 +212,7 @@ private:
|
||||
const UInt64 seed;
|
||||
|
||||
public:
|
||||
SignedIntegerModel(UInt64 seed) : seed(seed) {}
|
||||
SignedIntegerModel(UInt64 seed_) : seed(seed_) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -256,7 +256,7 @@ private:
|
||||
Float res_prev_value = 0;
|
||||
|
||||
public:
|
||||
FloatModel(UInt64 seed) : seed(seed) {}
|
||||
FloatModel(UInt64 seed_) : seed(seed_) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -348,7 +348,7 @@ private:
|
||||
const UInt64 seed;
|
||||
|
||||
public:
|
||||
FixedStringModel(UInt64 seed) : seed(seed) {}
|
||||
FixedStringModel(UInt64 seed_) : seed(seed_) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -385,7 +385,7 @@ private:
|
||||
const DateLUTImpl & date_lut;
|
||||
|
||||
public:
|
||||
DateTimeModel(UInt64 seed) : seed(seed), date_lut(DateLUT::instance()) {}
|
||||
DateTimeModel(UInt64 seed_) : seed(seed_), date_lut(DateLUT::instance()) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -533,8 +533,8 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
MarkovModel(MarkovModelParameters params)
|
||||
: params(std::move(params)), code_points(params.order, BEGIN) {}
|
||||
MarkovModel(MarkovModelParameters params_)
|
||||
: params(std::move(params_)), code_points(params.order, BEGIN) {}
|
||||
|
||||
void consume(const char * data, size_t size)
|
||||
{
|
||||
@ -745,7 +745,7 @@ private:
|
||||
MarkovModel markov_model;
|
||||
|
||||
public:
|
||||
StringModel(UInt64 seed, MarkovModelParameters params) : seed(seed), markov_model(std::move(params)) {}
|
||||
StringModel(UInt64 seed_, MarkovModelParameters params_) : seed(seed_), markov_model(std::move(params_)) {}
|
||||
|
||||
void train(const IColumn & column) override
|
||||
{
|
||||
@ -797,7 +797,7 @@ private:
|
||||
ModelPtr nested_model;
|
||||
|
||||
public:
|
||||
ArrayModel(ModelPtr nested_model) : nested_model(std::move(nested_model)) {}
|
||||
ArrayModel(ModelPtr nested_model_) : nested_model(std::move(nested_model_)) {}
|
||||
|
||||
void train(const IColumn & column) override
|
||||
{
|
||||
@ -830,7 +830,7 @@ private:
|
||||
ModelPtr nested_model;
|
||||
|
||||
public:
|
||||
NullableModel(ModelPtr nested_model) : nested_model(std::move(nested_model)) {}
|
||||
NullableModel(ModelPtr nested_model_) : nested_model(std::move(nested_model_)) {}
|
||||
|
||||
void train(const IColumn & column) override
|
||||
{
|
||||
|
@ -18,12 +18,12 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
ODBCBlockInputStream::ODBCBlockInputStream(
|
||||
Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size)
|
||||
: session{session}
|
||||
Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_)
|
||||
: session{session_}
|
||||
, statement{(this->session << query_str, Poco::Data::Keywords::now)}
|
||||
, result{statement}
|
||||
, iterator{result.begin()}
|
||||
, max_block_size{max_block_size}
|
||||
, max_block_size{max_block_size_}
|
||||
, log(&Logger::get("ODBCBlockInputStream"))
|
||||
{
|
||||
if (sample_block.columns() != result.columnCount())
|
||||
@ -43,46 +43,46 @@ namespace
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case ValueType::UInt8:
|
||||
case ValueType::vtUInt8:
|
||||
static_cast<ColumnUInt8 &>(column).insertValue(value.convert<UInt64>());
|
||||
break;
|
||||
case ValueType::UInt16:
|
||||
case ValueType::vtUInt16:
|
||||
static_cast<ColumnUInt16 &>(column).insertValue(value.convert<UInt64>());
|
||||
break;
|
||||
case ValueType::UInt32:
|
||||
case ValueType::vtUInt32:
|
||||
static_cast<ColumnUInt32 &>(column).insertValue(value.convert<UInt64>());
|
||||
break;
|
||||
case ValueType::UInt64:
|
||||
case ValueType::vtUInt64:
|
||||
static_cast<ColumnUInt64 &>(column).insertValue(value.convert<UInt64>());
|
||||
break;
|
||||
case ValueType::Int8:
|
||||
case ValueType::vtInt8:
|
||||
static_cast<ColumnInt8 &>(column).insertValue(value.convert<Int64>());
|
||||
break;
|
||||
case ValueType::Int16:
|
||||
case ValueType::vtInt16:
|
||||
static_cast<ColumnInt16 &>(column).insertValue(value.convert<Int64>());
|
||||
break;
|
||||
case ValueType::Int32:
|
||||
case ValueType::vtInt32:
|
||||
static_cast<ColumnInt32 &>(column).insertValue(value.convert<Int64>());
|
||||
break;
|
||||
case ValueType::Int64:
|
||||
case ValueType::vtInt64:
|
||||
static_cast<ColumnInt64 &>(column).insertValue(value.convert<Int64>());
|
||||
break;
|
||||
case ValueType::Float32:
|
||||
case ValueType::vtFloat32:
|
||||
static_cast<ColumnFloat32 &>(column).insertValue(value.convert<Float64>());
|
||||
break;
|
||||
case ValueType::Float64:
|
||||
case ValueType::vtFloat64:
|
||||
static_cast<ColumnFloat64 &>(column).insertValue(value.convert<Float64>());
|
||||
break;
|
||||
case ValueType::String:
|
||||
case ValueType::vtString:
|
||||
static_cast<ColumnString &>(column).insert(value.convert<String>());
|
||||
break;
|
||||
case ValueType::Date:
|
||||
case ValueType::vtDate:
|
||||
static_cast<ColumnUInt16 &>(column).insertValue(UInt16{LocalDate{value.convert<String>()}.getDayNum()});
|
||||
break;
|
||||
case ValueType::DateTime:
|
||||
case ValueType::vtDateTime:
|
||||
static_cast<ColumnUInt32 &>(column).insertValue(time_t{LocalDateTime{value.convert<String>()}});
|
||||
break;
|
||||
case ValueType::UUID:
|
||||
case ValueType::vtUUID:
|
||||
static_cast<ColumnUInt128 &>(column).insert(parse<UUID>(value.convert<std::string>()));
|
||||
break;
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ class ODBCBlockInputStream final : public IBlockInputStream
|
||||
{
|
||||
public:
|
||||
ODBCBlockInputStream(
|
||||
Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size);
|
||||
Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_);
|
||||
|
||||
String getName() const override { return "ODBC"; }
|
||||
|
||||
|
@ -16,8 +16,8 @@ namespace DB
|
||||
{
|
||||
|
||||
MetricsTransmitter::MetricsTransmitter(
|
||||
const Poco::Util::AbstractConfiguration & config, const std::string & config_name, const AsynchronousMetrics & async_metrics)
|
||||
: async_metrics(async_metrics), config_name(config_name)
|
||||
const Poco::Util::AbstractConfiguration & config, const std::string & config_name_, const AsynchronousMetrics & async_metrics_)
|
||||
: async_metrics(async_metrics_), config_name(config_name_)
|
||||
{
|
||||
interval_seconds = config.getInt(config_name + ".interval", 60);
|
||||
send_events = config.getBool(config_name + ".events", true);
|
||||
|
@ -32,7 +32,7 @@ class AsynchronousMetrics;
|
||||
class MetricsTransmitter
|
||||
{
|
||||
public:
|
||||
MetricsTransmitter(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, const AsynchronousMetrics & async_metrics);
|
||||
MetricsTransmitter(const Poco::Util::AbstractConfiguration & config, const std::string & config_name_, const AsynchronousMetrics & async_metrics_);
|
||||
~MetricsTransmitter();
|
||||
|
||||
private:
|
||||
|
@ -37,14 +37,14 @@ namespace ErrorCodes
|
||||
extern const int OPENSSL_ERROR;
|
||||
}
|
||||
|
||||
MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key, RSA & private_key, bool ssl_enabled, size_t connection_id)
|
||||
MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key_, RSA & private_key_, bool ssl_enabled, size_t connection_id_)
|
||||
: Poco::Net::TCPServerConnection(socket_)
|
||||
, server(server_)
|
||||
, log(&Poco::Logger::get("MySQLHandler"))
|
||||
, connection_context(server.context())
|
||||
, connection_id(connection_id)
|
||||
, public_key(public_key)
|
||||
, private_key(private_key)
|
||||
, connection_id(connection_id_)
|
||||
, public_key(public_key_)
|
||||
, private_key(private_key_)
|
||||
{
|
||||
server_capability_flags = CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION | CLIENT_PLUGIN_AUTH | CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA | CLIENT_CONNECT_WITH_DB | CLIENT_DEPRECATE_EOF;
|
||||
if (ssl_enabled)
|
||||
|
@ -14,7 +14,7 @@ namespace DB
|
||||
class MySQLHandler : public Poco::Net::TCPServerConnection
|
||||
{
|
||||
public:
|
||||
MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key, RSA & private_key, bool ssl_enabled, size_t connection_id);
|
||||
MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key_, RSA & private_key_, bool ssl_enabled, size_t connection_id_);
|
||||
|
||||
void run() final;
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <Common/getFQDNOrHostName.h>
|
||||
#include <Common/getMultipleKeysFromConfig.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/getExecutablePath.h>
|
||||
#include <Common/TaskStatsInfoGetter.h>
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <IO/HTTPCommon.h>
|
||||
@ -156,19 +157,19 @@ std::string Server::getDefaultCorePath() const
|
||||
return getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH)) + "cores";
|
||||
}
|
||||
|
||||
void Server::defineOptions(Poco::Util::OptionSet & _options)
|
||||
void Server::defineOptions(Poco::Util::OptionSet & options)
|
||||
{
|
||||
_options.addOption(
|
||||
options.addOption(
|
||||
Poco::Util::Option("help", "h", "show help and exit")
|
||||
.required(false)
|
||||
.repeatable(false)
|
||||
.binding("help"));
|
||||
_options.addOption(
|
||||
options.addOption(
|
||||
Poco::Util::Option("version", "V", "show version and exit")
|
||||
.required(false)
|
||||
.repeatable(false)
|
||||
.binding("version"));
|
||||
BaseDaemon::defineOptions(_options);
|
||||
BaseDaemon::defineOptions(options);
|
||||
}
|
||||
|
||||
int Server::main(const std::vector<std::string> & /*args*/)
|
||||
@ -212,6 +213,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
const auto memory_amount = getMemoryAmount();
|
||||
|
||||
#if defined(__linux__)
|
||||
std::string executable_path = getExecutablePath();
|
||||
if (executable_path.empty())
|
||||
executable_path = "/usr/bin/clickhouse"; /// It is used for information messages.
|
||||
|
||||
/// After full config loaded
|
||||
{
|
||||
if (config().getBool("mlock_executable", false))
|
||||
@ -228,7 +233,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
LOG_INFO(log, "It looks like the process has no CAP_IPC_LOCK capability, binary mlock will be disabled."
|
||||
" It could happen due to incorrect ClickHouse package installation."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep /usr/bin/clickhouse'."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep " << executable_path << "'."
|
||||
" Note that it will not work on 'nosuid' mounted filesystems.");
|
||||
}
|
||||
}
|
||||
@ -547,7 +552,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
LOG_INFO(log, "It looks like the process has no CAP_NET_ADMIN capability, 'taskstats' performance statistics will be disabled."
|
||||
" It could happen due to incorrect ClickHouse package installation."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_net_admin=+ep /usr/bin/clickhouse'."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_net_admin=+ep " << executable_path << "'."
|
||||
" Note that it will not work on 'nosuid' mounted filesystems."
|
||||
" It also doesn't work if you run clickhouse-server inside network namespace as it happens in some containers.");
|
||||
}
|
||||
@ -556,7 +561,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
LOG_INFO(log, "It looks like the process has no CAP_SYS_NICE capability, the setting 'os_thread_nice' will have no effect."
|
||||
" It could happen due to incorrect ClickHouse package installation."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep /usr/bin/clickhouse'."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep " << executable_path << "'."
|
||||
" Note that it will not work on 'nosuid' mounted filesystems.");
|
||||
}
|
||||
#else
|
||||
|
@ -35,8 +35,8 @@ private:
|
||||
const DataTypePtr & type_val;
|
||||
|
||||
public:
|
||||
AggregateFunctionArgMinMax(const DataTypePtr & type_res, const DataTypePtr & type_val)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionArgMinMax<Data, AllocatesMemoryInArena>>({type_res, type_val}, {}),
|
||||
AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionArgMinMax<Data, AllocatesMemoryInArena>>({type_res_, type_val_}, {}),
|
||||
type_res(this->argument_types[0]), type_val(this->argument_types[1])
|
||||
{
|
||||
if (!type_val->isComparable())
|
||||
|
@ -62,12 +62,6 @@ public:
|
||||
static_cast<ColumnUInt64 &>(to).getData().push_back(data(place).count);
|
||||
}
|
||||
|
||||
/// May be used for optimization.
|
||||
void addDelta(AggregateDataPtr place, UInt64 x) const
|
||||
{
|
||||
data(place).count += x;
|
||||
}
|
||||
|
||||
const char * getHeaderFilePath() const override { return __FILE__; }
|
||||
};
|
||||
|
||||
|
@ -253,8 +253,8 @@ class GroupArrayGeneralListImpl final
|
||||
UInt64 max_elems;
|
||||
|
||||
public:
|
||||
GroupArrayGeneralListImpl(const DataTypePtr & data_type, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<GroupArrayGeneralListData<Node>, GroupArrayGeneralListImpl<Node, limit_num_elems>>({data_type}, {})
|
||||
GroupArrayGeneralListImpl(const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<GroupArrayGeneralListData<Node>, GroupArrayGeneralListImpl<Node, limit_num_elems>>({data_type_}, {})
|
||||
, data_type(this->argument_types[0]), max_elems(max_elems_) {}
|
||||
|
||||
String getName() const override { return "groupArray"; }
|
||||
|
@ -164,8 +164,8 @@ class AggregateFunctionGroupUniqArrayGeneric
|
||||
}
|
||||
|
||||
public:
|
||||
AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData, AggregateFunctionGroupUniqArrayGeneric<is_plain_column, Tlimit_num_elem>>({input_data_type}, {})
|
||||
AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData, AggregateFunctionGroupUniqArrayGeneric<is_plain_column, Tlimit_num_elem>>({input_data_type_}, {})
|
||||
, input_data_type(this->argument_types[0])
|
||||
, max_elems(max_elems_) {}
|
||||
|
||||
|
@ -304,9 +304,9 @@ private:
|
||||
const UInt32 max_bins;
|
||||
|
||||
public:
|
||||
AggregateFunctionHistogram(UInt32 max_bins, const DataTypes & arguments, const Array & params)
|
||||
AggregateFunctionHistogram(UInt32 max_bins_, const DataTypes & arguments, const Array & params)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionHistogramData, AggregateFunctionHistogram<T>>(arguments, params)
|
||||
, max_bins(max_bins)
|
||||
, max_bins(max_bins_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -104,21 +104,21 @@ void registerAggregateFunctionMLMethod(AggregateFunctionFactory & factory)
|
||||
}
|
||||
|
||||
LinearModelData::LinearModelData(
|
||||
Float64 learning_rate,
|
||||
Float64 l2_reg_coef,
|
||||
UInt64 param_num,
|
||||
UInt64 batch_capacity,
|
||||
std::shared_ptr<DB::IGradientComputer> gradient_computer,
|
||||
std::shared_ptr<DB::IWeightsUpdater> weights_updater)
|
||||
: learning_rate(learning_rate)
|
||||
, l2_reg_coef(l2_reg_coef)
|
||||
, batch_capacity(batch_capacity)
|
||||
Float64 learning_rate_,
|
||||
Float64 l2_reg_coef_,
|
||||
UInt64 param_num_,
|
||||
UInt64 batch_capacity_,
|
||||
std::shared_ptr<DB::IGradientComputer> gradient_computer_,
|
||||
std::shared_ptr<DB::IWeightsUpdater> weights_updater_)
|
||||
: learning_rate(learning_rate_)
|
||||
, l2_reg_coef(l2_reg_coef_)
|
||||
, batch_capacity(batch_capacity_)
|
||||
, batch_size(0)
|
||||
, gradient_computer(std::move(gradient_computer))
|
||||
, weights_updater(std::move(weights_updater))
|
||||
, gradient_computer(std::move(gradient_computer_))
|
||||
, weights_updater(std::move(weights_updater_))
|
||||
{
|
||||
weights.resize(param_num, Float64{0.0});
|
||||
gradient_batch.resize(param_num + 1, Float64{0.0});
|
||||
weights.resize(param_num_, Float64{0.0});
|
||||
gradient_batch.resize(param_num_ + 1, Float64{0.0});
|
||||
}
|
||||
|
||||
void LinearModelData::update_state()
|
||||
|
@ -248,12 +248,12 @@ public:
|
||||
LinearModelData() {}
|
||||
|
||||
LinearModelData(
|
||||
Float64 learning_rate,
|
||||
Float64 l2_reg_coef,
|
||||
UInt64 param_num,
|
||||
UInt64 batch_capacity,
|
||||
std::shared_ptr<IGradientComputer> gradient_computer,
|
||||
std::shared_ptr<IWeightsUpdater> weights_updater);
|
||||
Float64 learning_rate_,
|
||||
Float64 l2_reg_coef_,
|
||||
UInt64 param_num_,
|
||||
UInt64 batch_capacity_,
|
||||
std::shared_ptr<IGradientComputer> gradient_computer_,
|
||||
std::shared_ptr<IWeightsUpdater> weights_updater_);
|
||||
|
||||
void add(const IColumn ** columns, size_t row_num);
|
||||
|
||||
@ -304,21 +304,21 @@ public:
|
||||
String getName() const override { return Name::name; }
|
||||
|
||||
explicit AggregateFunctionMLMethod(
|
||||
UInt32 param_num,
|
||||
std::unique_ptr<IGradientComputer> gradient_computer,
|
||||
std::string weights_updater_name,
|
||||
Float64 learning_rate,
|
||||
Float64 l2_reg_coef,
|
||||
UInt64 batch_size,
|
||||
UInt32 param_num_,
|
||||
std::unique_ptr<IGradientComputer> gradient_computer_,
|
||||
std::string weights_updater_name_,
|
||||
Float64 learning_rate_,
|
||||
Float64 l2_reg_coef_,
|
||||
UInt64 batch_size_,
|
||||
const DataTypes & arguments_types,
|
||||
const Array & params)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionMLMethod<Data, Name>>(arguments_types, params)
|
||||
, param_num(param_num)
|
||||
, learning_rate(learning_rate)
|
||||
, l2_reg_coef(l2_reg_coef)
|
||||
, batch_size(batch_size)
|
||||
, gradient_computer(std::move(gradient_computer))
|
||||
, weights_updater_name(std::move(weights_updater_name))
|
||||
, param_num(param_num_)
|
||||
, learning_rate(learning_rate_)
|
||||
, l2_reg_coef(l2_reg_coef_)
|
||||
, batch_size(batch_size_)
|
||||
, gradient_computer(std::move(gradient_computer_))
|
||||
, weights_updater_name(std::move(weights_updater_name_))
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -679,8 +679,8 @@ private:
|
||||
DataTypePtr & type;
|
||||
|
||||
public:
|
||||
AggregateFunctionsSingleValue(const DataTypePtr & type)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionsSingleValue<Data, AllocatesMemoryInArena>>({type}, {})
|
||||
AggregateFunctionsSingleValue(const DataTypePtr & type_)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionsSingleValue<Data, AllocatesMemoryInArena>>({type_}, {})
|
||||
, type(this->argument_types[0])
|
||||
{
|
||||
if (StringRef(Data::name()) == StringRef("min")
|
||||
|
@ -76,8 +76,8 @@ private:
|
||||
DataTypePtr & argument_type;
|
||||
|
||||
public:
|
||||
AggregateFunctionQuantile(const DataTypePtr & argument_type, const Array & params)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionQuantile<Value, Data, Name, has_second_arg, FloatReturnType, returns_many>>({argument_type}, params)
|
||||
AggregateFunctionQuantile(const DataTypePtr & argument_type_, const Array & params)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionQuantile<Value, Data, Name, has_second_arg, FloatReturnType, returns_many>>({argument_type_}, params)
|
||||
, levels(params, returns_many), level(levels.levels[0]), argument_type(this->argument_types[0])
|
||||
{
|
||||
if (!returns_many && levels.size() > 1)
|
||||
|
@ -33,18 +33,18 @@ private:
|
||||
|
||||
public:
|
||||
AggregateFunctionResample(
|
||||
AggregateFunctionPtr nested_function,
|
||||
Key begin,
|
||||
Key end,
|
||||
size_t step,
|
||||
AggregateFunctionPtr nested_function_,
|
||||
Key begin_,
|
||||
Key end_,
|
||||
size_t step_,
|
||||
const DataTypes & arguments,
|
||||
const Array & params)
|
||||
: IAggregateFunctionHelper<AggregateFunctionResample<Key>>{arguments, params}
|
||||
, nested_function{nested_function}
|
||||
, nested_function{nested_function_}
|
||||
, last_col{arguments.size() - 1}
|
||||
, begin{begin}
|
||||
, end{end}
|
||||
, step{step}
|
||||
, begin{begin_}
|
||||
, end{end_}
|
||||
, step{step_}
|
||||
, total{0}
|
||||
, aod{nested_function->alignOfData()}
|
||||
, sod{(nested_function->sizeOfData() + aod - 1) / aod * aod}
|
||||
|
@ -142,9 +142,9 @@ template <typename T, typename Data, typename Derived>
|
||||
class AggregateFunctionSequenceBase : public IAggregateFunctionDataHelper<Data, Derived>
|
||||
{
|
||||
public:
|
||||
AggregateFunctionSequenceBase(const DataTypes & arguments, const Array & params, const String & pattern)
|
||||
AggregateFunctionSequenceBase(const DataTypes & arguments, const Array & params, const String & pattern_)
|
||||
: IAggregateFunctionDataHelper<Data, Derived>(arguments, params)
|
||||
, pattern(pattern)
|
||||
, pattern(pattern_)
|
||||
{
|
||||
arg_count = arguments.size();
|
||||
parsePattern();
|
||||
@ -199,7 +199,7 @@ private:
|
||||
std::uint64_t extra;
|
||||
|
||||
PatternAction() = default;
|
||||
PatternAction(const PatternActionType type, const std::uint64_t extra = 0) : type{type}, extra{extra} {}
|
||||
PatternAction(const PatternActionType type_, const std::uint64_t extra_ = 0) : type{type_}, extra{extra_} {}
|
||||
};
|
||||
|
||||
using PatternActions = PODArrayWithStackMemory<PatternAction, 64>;
|
||||
@ -520,8 +520,8 @@ private:
|
||||
|
||||
struct DFAState
|
||||
{
|
||||
DFAState(bool has_kleene = false)
|
||||
: has_kleene{has_kleene}, event{0}, transition{DFATransition::None}
|
||||
DFAState(bool has_kleene_ = false)
|
||||
: has_kleene{has_kleene_}, event{0}, transition{DFATransition::None}
|
||||
{}
|
||||
|
||||
/// .-------.
|
||||
@ -554,8 +554,8 @@ template <typename T, typename Data>
|
||||
class AggregateFunctionSequenceMatch final : public AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceMatch<T, Data>>
|
||||
{
|
||||
public:
|
||||
AggregateFunctionSequenceMatch(const DataTypes & arguments, const Array & params, const String & pattern)
|
||||
: AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceMatch<T, Data>>(arguments, params, pattern) {}
|
||||
AggregateFunctionSequenceMatch(const DataTypes & arguments, const Array & params, const String & pattern_)
|
||||
: AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceMatch<T, Data>>(arguments, params, pattern_) {}
|
||||
|
||||
using AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceMatch<T, Data>>::AggregateFunctionSequenceBase;
|
||||
|
||||
@ -582,8 +582,8 @@ template <typename T, typename Data>
|
||||
class AggregateFunctionSequenceCount final : public AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceCount<T, Data>>
|
||||
{
|
||||
public:
|
||||
AggregateFunctionSequenceCount(const DataTypes & arguments, const Array & params, const String & pattern)
|
||||
: AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceCount<T, Data>>(arguments, params, pattern) {}
|
||||
AggregateFunctionSequenceCount(const DataTypes & arguments, const Array & params, const String & pattern_)
|
||||
: AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceCount<T, Data>>(arguments, params, pattern_) {}
|
||||
|
||||
using AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceCount<T, Data>>::AggregateFunctionSequenceBase;
|
||||
|
||||
|
@ -23,9 +23,9 @@ private:
|
||||
Array params;
|
||||
|
||||
public:
|
||||
AggregateFunctionState(AggregateFunctionPtr nested, const DataTypes & arguments, const Array & params)
|
||||
: IAggregateFunctionHelper<AggregateFunctionState>(arguments, params)
|
||||
, nested_func(nested), arguments(arguments), params(params) {}
|
||||
AggregateFunctionState(AggregateFunctionPtr nested_, const DataTypes & arguments_, const Array & params_)
|
||||
: IAggregateFunctionHelper<AggregateFunctionState>(arguments_, params_)
|
||||
, nested_func(nested_), arguments(arguments_), params(params_) {}
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
|
@ -62,10 +62,10 @@ private:
|
||||
|
||||
public:
|
||||
AggregateFunctionSumMapBase(
|
||||
const DataTypePtr & keys_type, const DataTypes & values_types,
|
||||
const DataTypePtr & keys_type_, const DataTypes & values_types_,
|
||||
const DataTypes & argument_types_, const Array & params_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionSumMapData<NearestFieldType<T>>, Derived>(argument_types_, params_)
|
||||
, keys_type(keys_type), values_types(values_types) {}
|
||||
, keys_type(keys_type_), values_types(values_types_) {}
|
||||
|
||||
String getName() const override { return "sumMap"; }
|
||||
|
||||
@ -295,9 +295,9 @@ private:
|
||||
|
||||
public:
|
||||
AggregateFunctionSumMapFiltered(
|
||||
const DataTypePtr & keys_type, const DataTypes & values_types, const Array & keys_to_keep_,
|
||||
const DataTypePtr & keys_type_, const DataTypes & values_types_, const Array & keys_to_keep_,
|
||||
const DataTypes & argument_types_, const Array & params_)
|
||||
: Base{keys_type, values_types, argument_types_, params_}
|
||||
: Base{keys_type_, values_types_, argument_types_, params_}
|
||||
{
|
||||
keys_to_keep.reserve(keys_to_keep_.size());
|
||||
for (const Field & f : keys_to_keep_)
|
||||
|
@ -44,9 +44,9 @@ protected:
|
||||
UInt64 reserved;
|
||||
|
||||
public:
|
||||
AggregateFunctionTopK(UInt64 threshold, UInt64 load_factor, const DataTypes & argument_types_, const Array & params)
|
||||
AggregateFunctionTopK(UInt64 threshold_, UInt64 load_factor, const DataTypes & argument_types_, const Array & params)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionTopKData<T>, AggregateFunctionTopK<T, is_weighted>>(argument_types_, params)
|
||||
, threshold(threshold), reserved(load_factor * threshold) {}
|
||||
, threshold(threshold_), reserved(load_factor * threshold) {}
|
||||
|
||||
String getName() const override { return is_weighted ? "topKWeighted" : "topK"; }
|
||||
|
||||
@ -139,9 +139,9 @@ private:
|
||||
|
||||
public:
|
||||
AggregateFunctionTopKGeneric(
|
||||
UInt64 threshold, UInt64 load_factor, const DataTypePtr & input_data_type, const Array & params)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionTopKGenericData, AggregateFunctionTopKGeneric<is_plain_column, is_weighted>>({input_data_type}, params)
|
||||
, threshold(threshold), reserved(load_factor * threshold), input_data_type(this->argument_types[0]) {}
|
||||
UInt64 threshold_, UInt64 load_factor, const DataTypePtr & input_data_type_, const Array & params)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionTopKGenericData, AggregateFunctionTopKGeneric<is_plain_column, is_weighted>>({input_data_type_}, params)
|
||||
, threshold(threshold_), reserved(load_factor * threshold), input_data_type(this->argument_types[0]) {}
|
||||
|
||||
String getName() const override { return is_weighted ? "topKWeighted" : "topK"; }
|
||||
|
||||
|
@ -136,9 +136,9 @@ private:
|
||||
UInt8 threshold;
|
||||
|
||||
public:
|
||||
AggregateFunctionUniqUpTo(UInt8 threshold, const DataTypes & argument_types_, const Array & params_)
|
||||
AggregateFunctionUniqUpTo(UInt8 threshold_, const DataTypes & argument_types_, const Array & params_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionUniqUpToData<T>, AggregateFunctionUniqUpTo<T>>(argument_types_, params_)
|
||||
, threshold(threshold)
|
||||
, threshold(threshold_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -196,9 +196,9 @@ private:
|
||||
UInt8 threshold;
|
||||
|
||||
public:
|
||||
AggregateFunctionUniqUpToVariadic(const DataTypes & arguments, const Array & params, UInt8 threshold)
|
||||
AggregateFunctionUniqUpToVariadic(const DataTypes & arguments, const Array & params, UInt8 threshold_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionUniqUpToData<UInt64>, AggregateFunctionUniqUpToVariadic<is_exact, argument_is_tuple>>(arguments, params)
|
||||
, threshold(threshold)
|
||||
, threshold(threshold_)
|
||||
{
|
||||
if (argument_is_tuple)
|
||||
num_args = typeid_cast<const DataTypeTuple &>(*arguments[0]).getElements().size();
|
||||
|
@ -128,6 +128,15 @@ public:
|
||||
using AddFunc = void (*)(const IAggregateFunction *, AggregateDataPtr, const IColumn **, size_t, Arena *);
|
||||
virtual AddFunc getAddressOfAddFunction() const = 0;
|
||||
|
||||
/** Contains a loop with calls to "add" function. You can collect arguments into array "places"
|
||||
* and do a single call to "addBatch" for devirtualization and inlining.
|
||||
*/
|
||||
virtual void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const = 0;
|
||||
|
||||
/** The same for single place.
|
||||
*/
|
||||
virtual void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const = 0;
|
||||
|
||||
/** This is used for runtime code generation to determine, which header files to include in generated source.
|
||||
* Always implement it as
|
||||
* const char * getHeaderFilePath() const override { return __FILE__; }
|
||||
@ -156,7 +165,20 @@ private:
|
||||
public:
|
||||
IAggregateFunctionHelper(const DataTypes & argument_types_, const Array & parameters_)
|
||||
: IAggregateFunction(argument_types_, parameters_) {}
|
||||
|
||||
AddFunc getAddressOfAddFunction() const override { return &addFree; }
|
||||
|
||||
void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < batch_size; ++i)
|
||||
static_cast<const Derived *>(this)->add(places[i] + place_offset, columns, i, arena);
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < batch_size; ++i)
|
||||
static_cast<const Derived *>(this)->add(place, columns, i, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -50,9 +50,9 @@ class QuantileTDigest
|
||||
|
||||
Centroid() = default;
|
||||
|
||||
explicit Centroid(Value mean, Count count)
|
||||
: mean(mean)
|
||||
, count(count)
|
||||
explicit Centroid(Value mean_, Count count_)
|
||||
: mean(mean_)
|
||||
, count(count_)
|
||||
{}
|
||||
|
||||
Centroid & operator+=(const Centroid & other)
|
||||
|
@ -53,8 +53,8 @@ class ReservoirSamplerDeterministic
|
||||
}
|
||||
|
||||
public:
|
||||
ReservoirSamplerDeterministic(const size_t sample_count = DEFAULT_SAMPLE_COUNT)
|
||||
: sample_count{sample_count}
|
||||
ReservoirSamplerDeterministic(const size_t sample_count_ = DEFAULT_SAMPLE_COUNT)
|
||||
: sample_count{sample_count_}
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,8 @@ namespace ErrorCodes
|
||||
extern const int SIZES_OF_COLUMNS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
ColumnConst::ColumnConst(const ColumnPtr & data_, size_t s)
|
||||
: data(data_), s(s)
|
||||
ColumnConst::ColumnConst(const ColumnPtr & data_, size_t s_)
|
||||
: data(data_), s(s_)
|
||||
{
|
||||
/// Squash Const of Const.
|
||||
while (const ColumnConst * const_data = typeid_cast<const ColumnConst *>(data.get()))
|
||||
|
@ -26,7 +26,7 @@ private:
|
||||
WrappedPtr data;
|
||||
size_t s;
|
||||
|
||||
ColumnConst(const ColumnPtr & data, size_t s);
|
||||
ColumnConst(const ColumnPtr & data, size_t s_);
|
||||
ColumnConst(const ColumnConst & src) = default;
|
||||
|
||||
public:
|
||||
|
@ -13,8 +13,8 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
ColumnFunction::ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture)
|
||||
: size_(size), function(function)
|
||||
ColumnFunction::ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture)
|
||||
: size_(size), function(function_)
|
||||
{
|
||||
appendArguments(columns_to_capture);
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ class ColumnFunction final : public COWHelper<IColumn, ColumnFunction>
|
||||
private:
|
||||
friend class COWHelper<IColumn, ColumnFunction>;
|
||||
|
||||
ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture);
|
||||
ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture);
|
||||
|
||||
public:
|
||||
const char * getFamilyName() const override { return "Function"; }
|
||||
|
@ -360,12 +360,12 @@ bool ColumnLowCardinality::containsNull() const
|
||||
|
||||
ColumnLowCardinality::Index::Index() : positions(ColumnUInt8::create()), size_of_type(sizeof(UInt8)) {}
|
||||
|
||||
ColumnLowCardinality::Index::Index(MutableColumnPtr && positions) : positions(std::move(positions))
|
||||
ColumnLowCardinality::Index::Index(MutableColumnPtr && positions_) : positions(std::move(positions_))
|
||||
{
|
||||
updateSizeOfType();
|
||||
}
|
||||
|
||||
ColumnLowCardinality::Index::Index(ColumnPtr positions) : positions(std::move(positions))
|
||||
ColumnLowCardinality::Index::Index(ColumnPtr positions_) : positions(std::move(positions_))
|
||||
{
|
||||
updateSizeOfType();
|
||||
}
|
||||
|
@ -201,8 +201,8 @@ public:
|
||||
public:
|
||||
Index();
|
||||
Index(const Index & other) = default;
|
||||
explicit Index(MutableColumnPtr && positions);
|
||||
explicit Index(ColumnPtr positions);
|
||||
explicit Index(MutableColumnPtr && positions_);
|
||||
explicit Index(ColumnPtr positions_);
|
||||
|
||||
const ColumnPtr & getPositions() const { return positions; }
|
||||
WrappedPtr & getPositionsPtr() { return positions; }
|
||||
|
@ -257,8 +257,8 @@ struct ColumnTuple::Less
|
||||
TupleColumns columns;
|
||||
int nan_direction_hint;
|
||||
|
||||
Less(const TupleColumns & columns, int nan_direction_hint_)
|
||||
: columns(columns), nan_direction_hint(nan_direction_hint_)
|
||||
Less(const TupleColumns & columns_, int nan_direction_hint_)
|
||||
: columns(columns_), nan_direction_hint(nan_direction_hint_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -186,10 +186,10 @@ ColumnUnique<ColumnType>::ColumnUnique(const IDataType & type)
|
||||
}
|
||||
|
||||
template <typename ColumnType>
|
||||
ColumnUnique<ColumnType>::ColumnUnique(MutableColumnPtr && holder, bool is_nullable)
|
||||
ColumnUnique<ColumnType>::ColumnUnique(MutableColumnPtr && holder, bool is_nullable_)
|
||||
: column_holder(std::move(holder))
|
||||
, is_nullable(is_nullable)
|
||||
, index(numSpecialValues(is_nullable), 0)
|
||||
, is_nullable(is_nullable_)
|
||||
, index(numSpecialValues(is_nullable_), 0)
|
||||
{
|
||||
if (column_holder->size() < numSpecialValues())
|
||||
throw Exception("Too small holder column for ColumnUnique.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
@ -235,8 +235,8 @@ template <typename IndexType, typename ColumnType>
|
||||
class ReverseIndex
|
||||
{
|
||||
public:
|
||||
explicit ReverseIndex(UInt64 num_prefix_rows_to_skip, UInt64 base_index)
|
||||
: num_prefix_rows_to_skip(num_prefix_rows_to_skip), base_index(base_index), saved_hash_ptr(nullptr) {}
|
||||
explicit ReverseIndex(UInt64 num_prefix_rows_to_skip_, UInt64 base_index_)
|
||||
: num_prefix_rows_to_skip(num_prefix_rows_to_skip_), base_index(base_index_), saved_hash_ptr(nullptr) {}
|
||||
|
||||
void setColumn(ColumnType * column_);
|
||||
|
||||
|
@ -243,11 +243,11 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod
|
||||
throw Exception("Cache wasn't created for HashMethodSingleLowCardinalityColumn",
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
LowCardinalityDictionaryCache * cache;
|
||||
LowCardinalityDictionaryCache * lcd_cache;
|
||||
if constexpr (use_cache)
|
||||
{
|
||||
cache = typeid_cast<LowCardinalityDictionaryCache *>(context.get());
|
||||
if (!cache)
|
||||
lcd_cache = typeid_cast<LowCardinalityDictionaryCache *>(context.get());
|
||||
if (!lcd_cache)
|
||||
{
|
||||
const auto & cached_val = *context;
|
||||
throw Exception("Invalid type for HashMethodSingleLowCardinalityColumn cache: "
|
||||
@ -267,7 +267,7 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod
|
||||
{
|
||||
dictionary_key = {column->getDictionary().getHash(), dict->size()};
|
||||
if constexpr (use_cache)
|
||||
cached_values = cache->get(dictionary_key);
|
||||
cached_values = lcd_cache->get(dictionary_key);
|
||||
}
|
||||
|
||||
if (cached_values)
|
||||
@ -288,7 +288,7 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod
|
||||
cached_values->saved_hash = saved_hash;
|
||||
cached_values->dictionary_holder = dictionary_holder;
|
||||
|
||||
cache->set(dictionary_key, cached_values);
|
||||
lcd_cache->set(dictionary_key, cached_values);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -470,8 +470,8 @@ struct HashMethodKeysFixed
|
||||
Sizes key_sizes;
|
||||
size_t keys_size;
|
||||
|
||||
HashMethodKeysFixed(const ColumnRawPtrs & key_columns, const Sizes & key_sizes, const HashMethodContextPtr &)
|
||||
: Base(key_columns), key_sizes(std::move(key_sizes)), keys_size(key_columns.size())
|
||||
HashMethodKeysFixed(const ColumnRawPtrs & key_columns, const Sizes & key_sizes_, const HashMethodContextPtr &)
|
||||
: Base(key_columns), key_sizes(std::move(key_sizes_)), keys_size(key_columns.size())
|
||||
{
|
||||
if constexpr (has_low_cardinality)
|
||||
{
|
||||
@ -525,8 +525,8 @@ struct HashMethodSerialized
|
||||
ColumnRawPtrs key_columns;
|
||||
size_t keys_size;
|
||||
|
||||
HashMethodSerialized(const ColumnRawPtrs & key_columns, const Sizes & /*key_sizes*/, const HashMethodContextPtr &)
|
||||
: key_columns(key_columns), keys_size(key_columns.size()) {}
|
||||
HashMethodSerialized(const ColumnRawPtrs & key_columns_, const Sizes & /*key_sizes*/, const HashMethodContextPtr &)
|
||||
: key_columns(key_columns_), keys_size(key_columns_.size()) {}
|
||||
|
||||
protected:
|
||||
friend class columns_hashing_impl::HashMethodBase<Self, Value, Mapped, false>;
|
||||
@ -550,8 +550,8 @@ struct HashMethodHashed
|
||||
|
||||
ColumnRawPtrs key_columns;
|
||||
|
||||
HashMethodHashed(ColumnRawPtrs key_columns, const Sizes &, const HashMethodContextPtr &)
|
||||
: key_columns(std::move(key_columns)) {}
|
||||
HashMethodHashed(ColumnRawPtrs key_columns_, const Sizes &, const HashMethodContextPtr &)
|
||||
: key_columns(std::move(key_columns_)) {}
|
||||
|
||||
ALWAYS_INLINE Key getKey(size_t row, Arena &) const { return hash128(row, key_columns.size(), key_columns); }
|
||||
|
||||
|
@ -56,8 +56,8 @@ class EmplaceResultImpl
|
||||
bool inserted;
|
||||
|
||||
public:
|
||||
EmplaceResultImpl(Mapped & value, Mapped & cached_value, bool inserted)
|
||||
: value(value), cached_value(cached_value), inserted(inserted) {}
|
||||
EmplaceResultImpl(Mapped & value_, Mapped & cached_value_, bool inserted_)
|
||||
: value(value_), cached_value(cached_value_), inserted(inserted_) {}
|
||||
|
||||
bool isInserted() const { return inserted; }
|
||||
auto & getMapped() const { return value; }
|
||||
@ -75,7 +75,7 @@ class EmplaceResultImpl<void>
|
||||
bool inserted;
|
||||
|
||||
public:
|
||||
explicit EmplaceResultImpl(bool inserted) : inserted(inserted) {}
|
||||
explicit EmplaceResultImpl(bool inserted_) : inserted(inserted_) {}
|
||||
bool isInserted() const { return inserted; }
|
||||
};
|
||||
|
||||
@ -86,7 +86,7 @@ class FindResultImpl
|
||||
bool found;
|
||||
|
||||
public:
|
||||
FindResultImpl(Mapped * value, bool found) : value(value), found(found) {}
|
||||
FindResultImpl(Mapped * value_, bool found_) : value(value_), found(found_) {}
|
||||
bool isFound() const { return found; }
|
||||
Mapped & getMapped() const { return *value; }
|
||||
};
|
||||
@ -97,7 +97,7 @@ class FindResultImpl<void>
|
||||
bool found;
|
||||
|
||||
public:
|
||||
explicit FindResultImpl(bool found) : found(found) {}
|
||||
explicit FindResultImpl(bool found_) : found(found_) {}
|
||||
bool isFound() const { return found; }
|
||||
};
|
||||
|
||||
|
@ -59,15 +59,15 @@ namespace CurrentMetrics
|
||||
std::atomic<Value> * what;
|
||||
Value amount;
|
||||
|
||||
Increment(std::atomic<Value> * what, Value amount)
|
||||
: what(what), amount(amount)
|
||||
Increment(std::atomic<Value> * what_, Value amount_)
|
||||
: what(what_), amount(amount_)
|
||||
{
|
||||
*what += amount;
|
||||
}
|
||||
|
||||
public:
|
||||
Increment(Metric metric, Value amount = 1)
|
||||
: Increment(&values[metric], amount) {}
|
||||
Increment(Metric metric, Value amount_ = 1)
|
||||
: Increment(&values[metric], amount_) {}
|
||||
|
||||
~Increment()
|
||||
{
|
||||
|
@ -55,8 +55,8 @@ Elf::Elf(const std::string & path)
|
||||
}
|
||||
|
||||
|
||||
Elf::Section::Section(const ElfShdr & header, const Elf & elf)
|
||||
: header(header), elf(elf)
|
||||
Elf::Section::Section(const ElfShdr & header_, const Elf & elf_)
|
||||
: header(header_), elf(elf_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
const char * end() const;
|
||||
size_t size() const;
|
||||
|
||||
Section(const ElfShdr & header, const Elf & elf);
|
||||
Section(const ElfShdr & header_, const Elf & elf_);
|
||||
|
||||
private:
|
||||
const Elf & elf;
|
||||
|
@ -442,6 +442,7 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_PARSE_DWARF = 465;
|
||||
extern const int INSECURE_PATH = 466;
|
||||
extern const int CANNOT_PARSE_BOOL = 467;
|
||||
extern const int CANNOT_PTHREAD_ATTR = 468;
|
||||
|
||||
extern const int KEEPER_EXCEPTION = 999;
|
||||
extern const int POCO_EXCEPTION = 1000;
|
||||
|
@ -167,7 +167,7 @@ String FieldVisitorToString::operator() (const Tuple & x_def) const
|
||||
}
|
||||
|
||||
|
||||
FieldVisitorHash::FieldVisitorHash(SipHash & hash) : hash(hash) {}
|
||||
FieldVisitorHash::FieldVisitorHash(SipHash & hash_) : hash(hash_) {}
|
||||
|
||||
void FieldVisitorHash::operator() (const Null &) const
|
||||
{
|
||||
|
@ -222,7 +222,7 @@ class FieldVisitorHash : public StaticVisitor<>
|
||||
private:
|
||||
SipHash & hash;
|
||||
public:
|
||||
FieldVisitorHash(SipHash & hash);
|
||||
FieldVisitorHash(SipHash & hash_);
|
||||
|
||||
void operator() (const Null & x) const;
|
||||
void operator() (const UInt64 & x) const;
|
||||
|
@ -31,9 +31,9 @@ class MemoryTracker
|
||||
const char * description = nullptr;
|
||||
|
||||
public:
|
||||
MemoryTracker(VariableContext level = VariableContext::Thread) : level(level) {}
|
||||
MemoryTracker(Int64 limit_, VariableContext level = VariableContext::Thread) : limit(limit_), level(level) {}
|
||||
MemoryTracker(MemoryTracker * parent_, VariableContext level = VariableContext::Thread) : parent(parent_), level(level) {}
|
||||
MemoryTracker(VariableContext level_ = VariableContext::Thread) : level(level_) {}
|
||||
MemoryTracker(Int64 limit_, VariableContext level_ = VariableContext::Thread) : limit(limit_), level(level_) {}
|
||||
MemoryTracker(MemoryTracker * parent_, VariableContext level_ = VariableContext::Thread) : parent(parent_), level(level_) {}
|
||||
|
||||
~MemoryTracker();
|
||||
|
||||
|
@ -191,10 +191,10 @@ Counters global_counters(global_counters_array);
|
||||
const Event Counters::num_counters = END;
|
||||
|
||||
|
||||
Counters::Counters(VariableContext level, Counters * parent)
|
||||
Counters::Counters(VariableContext level_, Counters * parent_)
|
||||
: counters_holder(new Counter[num_counters] {}),
|
||||
parent(parent),
|
||||
level(level)
|
||||
parent(parent_),
|
||||
level(level_)
|
||||
{
|
||||
counters = counters_holder.get();
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ namespace ProfileEvents
|
||||
VariableContext level = VariableContext::Thread;
|
||||
|
||||
/// By default, any instance have to increment global counters
|
||||
Counters(VariableContext level = VariableContext::Thread, Counters * parent = &global_counters);
|
||||
Counters(VariableContext level_ = VariableContext::Thread, Counters * parent_ = &global_counters);
|
||||
|
||||
/// Global level static initializer
|
||||
Counters(Counter * allocated_counters)
|
||||
|
@ -127,9 +127,9 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
template <typename ProfilerImpl>
|
||||
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal)
|
||||
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal_)
|
||||
: log(&Logger::get("QueryProfiler"))
|
||||
, pause_signal(pause_signal)
|
||||
, pause_signal(pause_signal_)
|
||||
{
|
||||
#if USE_INTERNAL_UNWIND_LIBRARY
|
||||
/// Sanity check.
|
||||
|
@ -35,7 +35,7 @@ template <typename ProfilerImpl>
|
||||
class QueryProfilerBase
|
||||
{
|
||||
public:
|
||||
QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal);
|
||||
QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal_);
|
||||
~QueryProfilerBase();
|
||||
|
||||
private:
|
||||
|
@ -161,9 +161,9 @@ RWLockImpl::LockHolderImpl::~LockHolderImpl()
|
||||
}
|
||||
|
||||
|
||||
RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent, RWLockImpl::GroupsContainer::iterator it_group,
|
||||
RWLockImpl::ClientsContainer::iterator it_client)
|
||||
: parent{std::move(parent)}, it_group{it_group}, it_client{it_client},
|
||||
RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent_, RWLockImpl::GroupsContainer::iterator it_group_,
|
||||
RWLockImpl::ClientsContainer::iterator it_client_)
|
||||
: parent{std::move(parent_)}, it_group{it_group_}, it_client{it_client_},
|
||||
active_client_increment{(*it_client == RWLockImpl::Read) ? CurrentMetrics::RWLockActiveReaders
|
||||
: CurrentMetrics::RWLockActiveWriters}
|
||||
{}
|
||||
|
@ -68,7 +68,7 @@ private:
|
||||
|
||||
std::condition_variable cv; /// all clients of the group wait group condvar
|
||||
|
||||
explicit Group(Type type) : type{type} {}
|
||||
explicit Group(Type type_) : type{type_} {}
|
||||
};
|
||||
|
||||
mutable std::mutex mutex;
|
||||
|
@ -34,13 +34,13 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_CREATE_CHILD_PROCESS;
|
||||
}
|
||||
|
||||
ShellCommand::ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_)
|
||||
: pid(pid)
|
||||
ShellCommand::ShellCommand(pid_t pid_, int in_fd_, int out_fd_, int err_fd_, bool terminate_in_destructor_)
|
||||
: pid(pid_)
|
||||
, terminate_in_destructor(terminate_in_destructor_)
|
||||
, log(&Poco::Logger::get("ShellCommand"))
|
||||
, in(in_fd)
|
||||
, out(out_fd)
|
||||
, err(err_fd) {}
|
||||
, in(in_fd_)
|
||||
, out(out_fd_)
|
||||
, err(err_fd_) {}
|
||||
|
||||
ShellCommand::~ShellCommand()
|
||||
{
|
||||
|
@ -32,7 +32,7 @@ private:
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_);
|
||||
ShellCommand(pid_t pid_, int in_fd_, int out_fd_, int err_fd_, bool terminate_in_destructor_);
|
||||
|
||||
static std::unique_ptr<ShellCommand> executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only, bool terminate_in_destructor);
|
||||
|
||||
|
@ -113,7 +113,8 @@ public:
|
||||
}
|
||||
|
||||
TKey key;
|
||||
size_t slot, hash;
|
||||
size_t slot;
|
||||
size_t hash;
|
||||
UInt64 count;
|
||||
UInt64 error;
|
||||
};
|
||||
@ -147,15 +148,13 @@ public:
|
||||
void insert(const TKey & key, UInt64 increment = 1, UInt64 error = 0)
|
||||
{
|
||||
// Increase weight of a key that already exists
|
||||
// It uses hashtable for both value mapping as a presence test (c_i != 0)
|
||||
auto hash = counter_map.hash(key);
|
||||
auto it = counter_map.find(key, hash);
|
||||
if (it != counter_map.end())
|
||||
auto counter = findCounter(key, hash);
|
||||
if (counter)
|
||||
{
|
||||
auto c = it->getSecond();
|
||||
c->count += increment;
|
||||
c->error += error;
|
||||
percolate(c);
|
||||
counter->count += increment;
|
||||
counter->error += error;
|
||||
percolate(counter);
|
||||
return;
|
||||
}
|
||||
// Key doesn't exist, but can fit in the top K
|
||||
@ -177,6 +176,7 @@ public:
|
||||
push(new Counter(arena.emplace(key), increment, error, hash));
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t alpha_mask = alpha_map.size() - 1;
|
||||
auto & alpha = alpha_map[hash & alpha_mask];
|
||||
if (alpha + increment < min->count)
|
||||
@ -187,22 +187,9 @@ public:
|
||||
|
||||
// Erase the current minimum element
|
||||
alpha_map[min->hash & alpha_mask] = min->count;
|
||||
it = counter_map.find(min->key, min->hash);
|
||||
destroyLastElement();
|
||||
|
||||
// Replace minimum with newly inserted element
|
||||
if (it != counter_map.end())
|
||||
{
|
||||
arena.free(min->key);
|
||||
min->hash = hash;
|
||||
min->key = arena.emplace(key);
|
||||
min->count = alpha + increment;
|
||||
min->error = alpha + error;
|
||||
percolate(min);
|
||||
|
||||
it->getSecond() = min;
|
||||
it->getFirstMutable() = min->key;
|
||||
counter_map.reinsert(it, hash);
|
||||
}
|
||||
push(new Counter(arena.emplace(key), alpha + increment, alpha + error, hash));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -242,17 +229,35 @@ public:
|
||||
// The list is sorted in descending order, we have to scan in reverse
|
||||
for (auto counter : boost::adaptors::reverse(rhs.counter_list))
|
||||
{
|
||||
if (counter_map.find(counter->key) != counter_map.end())
|
||||
size_t hash = counter_map.hash(counter->key);
|
||||
if (auto current = findCounter(counter->key, hash))
|
||||
{
|
||||
// Subtract m2 previously added, guaranteed not negative
|
||||
insert(counter->key, counter->count - m2, counter->error - m2);
|
||||
current->count += (counter->count - m2);
|
||||
current->error += (counter->error - m2);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Counters not monitored in S1
|
||||
insert(counter->key, counter->count + m1, counter->error + m1);
|
||||
counter_list.push_back(new Counter(arena.emplace(counter->key), counter->count + m1, counter->error + m1, hash));
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(counter_list.begin(), counter_list.end(), [](Counter * l, Counter * r) { return *l > *r; });
|
||||
|
||||
if (counter_list.size() > m_capacity)
|
||||
{
|
||||
for (size_t i = m_capacity; i < counter_list.size(); ++i)
|
||||
{
|
||||
arena.free(counter_list[i]->key);
|
||||
delete counter_list[i];
|
||||
}
|
||||
counter_list.resize(m_capacity);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < counter_list.size(); ++i)
|
||||
counter_list[i]->slot = i;
|
||||
rebuildCounterMap();
|
||||
}
|
||||
|
||||
std::vector<Counter> topK(size_t k) const
|
||||
@ -336,7 +341,10 @@ private:
|
||||
void destroyElements()
|
||||
{
|
||||
for (auto counter : counter_list)
|
||||
{
|
||||
arena.free(counter->key);
|
||||
delete counter;
|
||||
}
|
||||
|
||||
counter_map.clear();
|
||||
counter_list.clear();
|
||||
@ -346,19 +354,40 @@ private:
|
||||
void destroyLastElement()
|
||||
{
|
||||
auto last_element = counter_list.back();
|
||||
auto cell = counter_map.find(last_element->key, last_element->hash);
|
||||
cell->setZero();
|
||||
counter_map.reinsert(cell, last_element->hash);
|
||||
counter_list.pop_back();
|
||||
arena.free(last_element->key);
|
||||
delete last_element;
|
||||
counter_list.pop_back();
|
||||
|
||||
++removed_keys;
|
||||
if (removed_keys * 2 > counter_map.size())
|
||||
rebuildCounterMap();
|
||||
}
|
||||
|
||||
HashMap<TKey, Counter *, Hash, Grower, Allocator> counter_map;
|
||||
Counter * findCounter(const TKey & key, size_t hash)
|
||||
{
|
||||
auto it = counter_map.find(key, hash);
|
||||
if (it == counter_map.end())
|
||||
return nullptr;
|
||||
|
||||
return it->getSecond();
|
||||
}
|
||||
|
||||
void rebuildCounterMap()
|
||||
{
|
||||
removed_keys = 0;
|
||||
counter_map.clear();
|
||||
for (auto counter : counter_list)
|
||||
counter_map[counter->key] = counter;
|
||||
}
|
||||
|
||||
using CounterMap = HashMap<TKey, Counter *, Hash, Grower, Allocator>;
|
||||
|
||||
CounterMap counter_map;
|
||||
std::vector<Counter *> counter_list;
|
||||
std::vector<UInt64> alpha_map;
|
||||
SpaceSavingArena<TKey> arena;
|
||||
size_t m_capacity;
|
||||
size_t removed_keys = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
|
||||
operator bool() const { return parent != nullptr; }
|
||||
|
||||
Lock(AtomicStopwatch * parent) : parent(parent) {}
|
||||
Lock(AtomicStopwatch * parent_) : parent(parent_) {}
|
||||
|
||||
Lock(Lock &&) = default;
|
||||
|
||||
|
@ -75,8 +75,8 @@ private:
|
||||
#endif
|
||||
|
||||
public:
|
||||
StringSearcher(const char * const needle_, const size_t needle_size)
|
||||
: needle{reinterpret_cast<const UInt8 *>(needle_)}, needle_size{needle_size}
|
||||
StringSearcher(const char * const needle_, const size_t needle_size_)
|
||||
: needle{reinterpret_cast<const UInt8 *>(needle_)}, needle_size{needle_size_}
|
||||
{
|
||||
if (0 == needle_size)
|
||||
return;
|
||||
@ -714,8 +714,8 @@ struct LibCASCIICaseSensitiveStringSearcher
|
||||
{
|
||||
const char * const needle;
|
||||
|
||||
LibCASCIICaseSensitiveStringSearcher(const char * const needle, const size_t /* needle_size */)
|
||||
: needle(needle) {}
|
||||
LibCASCIICaseSensitiveStringSearcher(const char * const needle_, const size_t /* needle_size */)
|
||||
: needle(needle_) {}
|
||||
|
||||
const UInt8 * search(const UInt8 * haystack, const UInt8 * const haystack_end) const
|
||||
{
|
||||
@ -735,8 +735,8 @@ struct LibCASCIICaseInsensitiveStringSearcher
|
||||
{
|
||||
const char * const needle;
|
||||
|
||||
LibCASCIICaseInsensitiveStringSearcher(const char * const needle, const size_t /* needle_size */)
|
||||
: needle(needle) {}
|
||||
LibCASCIICaseInsensitiveStringSearcher(const char * const needle_, const size_t /* needle_size */)
|
||||
: needle(needle_) {}
|
||||
|
||||
const UInt8 * search(const UInt8 * haystack, const UInt8 * const haystack_end) const
|
||||
{
|
||||
|
@ -22,14 +22,14 @@ namespace CurrentMetrics
|
||||
|
||||
|
||||
template <typename Thread>
|
||||
ThreadPoolImpl<Thread>::ThreadPoolImpl(size_t max_threads)
|
||||
: ThreadPoolImpl(max_threads, max_threads, max_threads)
|
||||
ThreadPoolImpl<Thread>::ThreadPoolImpl(size_t max_threads_)
|
||||
: ThreadPoolImpl(max_threads_, max_threads_, max_threads_)
|
||||
{
|
||||
}
|
||||
|
||||
template <typename Thread>
|
||||
ThreadPoolImpl<Thread>::ThreadPoolImpl(size_t max_threads, size_t max_free_threads, size_t queue_size)
|
||||
: max_threads(max_threads), max_free_threads(max_free_threads), queue_size(queue_size)
|
||||
ThreadPoolImpl<Thread>::ThreadPoolImpl(size_t max_threads_, size_t max_free_threads_, size_t queue_size_)
|
||||
: max_threads(max_threads_), max_free_threads(max_free_threads_), queue_size(queue_size_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -31,10 +31,10 @@ public:
|
||||
using Job = std::function<void()>;
|
||||
|
||||
/// Size is constant. Up to num_threads are created on demand and then run until shutdown.
|
||||
explicit ThreadPoolImpl(size_t max_threads);
|
||||
explicit ThreadPoolImpl(size_t max_threads_);
|
||||
|
||||
/// queue_size - maximum number of running plus scheduled jobs. It can be greater than max_threads. Zero means unlimited.
|
||||
ThreadPoolImpl(size_t max_threads, size_t max_free_threads, size_t queue_size);
|
||||
ThreadPoolImpl(size_t max_threads_, size_t max_free_threads_, size_t queue_size_);
|
||||
|
||||
/// Add new job. Locks until number of scheduled jobs is less than maximum or exception in one of threads was thrown.
|
||||
/// If an exception in some thread was thrown, method silently returns, and exception will be rethrown only on call to 'wait' function.
|
||||
@ -81,8 +81,8 @@ private:
|
||||
Job job;
|
||||
int priority;
|
||||
|
||||
JobWithPriority(Job job, int priority)
|
||||
: job(job), priority(priority) {}
|
||||
JobWithPriority(Job job_, int priority_)
|
||||
: job(job_), priority(priority_) {}
|
||||
|
||||
bool operator< (const JobWithPriority & rhs) const
|
||||
{
|
||||
|
@ -36,12 +36,12 @@ namespace ErrorCodes
|
||||
class Throttler
|
||||
{
|
||||
public:
|
||||
Throttler(size_t max_speed_, const std::shared_ptr<Throttler> & parent = nullptr)
|
||||
: max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent) {}
|
||||
Throttler(size_t max_speed_, const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent_) {}
|
||||
|
||||
Throttler(size_t max_speed_, size_t limit_, const char * limit_exceeded_exception_message_,
|
||||
const std::shared_ptr<Throttler> & parent = nullptr)
|
||||
: max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent) {}
|
||||
const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent_) {}
|
||||
|
||||
void add(const size_t amount)
|
||||
{
|
||||
|
@ -28,9 +28,9 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_FCNTL;
|
||||
}
|
||||
|
||||
TraceCollector::TraceCollector(std::shared_ptr<TraceLog> & trace_log)
|
||||
TraceCollector::TraceCollector(std::shared_ptr<TraceLog> & trace_log_)
|
||||
: log(&Poco::Logger::get("TraceCollector"))
|
||||
, trace_log(trace_log)
|
||||
, trace_log(trace_log_)
|
||||
{
|
||||
if (trace_log == nullptr)
|
||||
throw Exception("Invalid trace log pointer passed", ErrorCodes::NULL_POINTER_DEREFERENCE);
|
||||
|
@ -24,7 +24,7 @@ private:
|
||||
static void notifyToStop();
|
||||
|
||||
public:
|
||||
TraceCollector(std::shared_ptr<TraceLog> & trace_log);
|
||||
TraceCollector(std::shared_ptr<TraceLog> & trace_log_);
|
||||
|
||||
~TraceCollector();
|
||||
};
|
||||
|
@ -28,7 +28,7 @@ struct UInt128
|
||||
UInt64 high;
|
||||
|
||||
UInt128() = default;
|
||||
explicit UInt128(const UInt64 low, const UInt64 high) : low(low), high(high) {}
|
||||
explicit UInt128(const UInt64 low_, const UInt64 high_) : low(low_), high(high_) {}
|
||||
explicit UInt128(const UInt64 rhs) : low(rhs), high() {}
|
||||
|
||||
auto tuple() const { return std::tie(high, low); }
|
||||
|
@ -331,11 +331,11 @@ public:
|
||||
* If you specify it small enough, the fallback algorithm will be used,
|
||||
* since it is considered that it's useless to waste time initializing the hash table.
|
||||
*/
|
||||
VolnitskyBase(const char * const needle, const size_t needle_size, size_t haystack_size_hint = 0)
|
||||
: needle{reinterpret_cast<const UInt8 *>(needle)}
|
||||
, needle_size{needle_size}
|
||||
VolnitskyBase(const char * const needle_, const size_t needle_size_, size_t haystack_size_hint = 0)
|
||||
: needle{reinterpret_cast<const UInt8 *>(needle_)}
|
||||
, needle_size{needle_size_}
|
||||
, fallback{VolnitskyTraits::isFallbackNeedle(needle_size, haystack_size_hint)}
|
||||
, fallback_searcher{needle, needle_size}
|
||||
, fallback_searcher{needle_, needle_size}
|
||||
{
|
||||
if (fallback)
|
||||
return;
|
||||
|
@ -23,8 +23,8 @@ namespace ProfileEvents
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
Exception::Exception(const std::string & msg, const int32_t code, int)
|
||||
: DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code)
|
||||
Exception::Exception(const std::string & msg, const int32_t code_, int)
|
||||
: DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code_)
|
||||
{
|
||||
if (Coordination::isUserError(code))
|
||||
ProfileEvents::increment(ProfileEvents::ZooKeeperUserExceptions);
|
||||
@ -34,18 +34,18 @@ Exception::Exception(const std::string & msg, const int32_t code, int)
|
||||
ProfileEvents::increment(ProfileEvents::ZooKeeperOtherExceptions);
|
||||
}
|
||||
|
||||
Exception::Exception(const std::string & msg, const int32_t code)
|
||||
: Exception(msg + " (" + errorMessage(code) + ")", code, 0)
|
||||
Exception::Exception(const std::string & msg, const int32_t code_)
|
||||
: Exception(msg + " (" + errorMessage(code_) + ")", code_, 0)
|
||||
{
|
||||
}
|
||||
|
||||
Exception::Exception(const int32_t code)
|
||||
: Exception(errorMessage(code), code, 0)
|
||||
Exception::Exception(const int32_t code_)
|
||||
: Exception(errorMessage(code_), code_, 0)
|
||||
{
|
||||
}
|
||||
|
||||
Exception::Exception(const int32_t code, const std::string & path)
|
||||
: Exception(std::string{errorMessage(code)} + ", path: " + path, code, 0)
|
||||
Exception::Exception(const int32_t code_, const std::string & path)
|
||||
: Exception(std::string{errorMessage(code_)} + ", path: " + path, code_, 0)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -301,12 +301,12 @@ class Exception : public DB::Exception
|
||||
{
|
||||
private:
|
||||
/// Delegate constructor, used to minimize repetition; last parameter used for overload resolution.
|
||||
Exception(const std::string & msg, const int32_t code, int);
|
||||
Exception(const std::string & msg, const int32_t code_, int);
|
||||
|
||||
public:
|
||||
explicit Exception(const int32_t code);
|
||||
Exception(const std::string & msg, const int32_t code);
|
||||
Exception(const int32_t code, const std::string & path);
|
||||
explicit Exception(const int32_t code_);
|
||||
Exception(const std::string & msg, const int32_t code_);
|
||||
Exception(const int32_t code_, const std::string & path);
|
||||
Exception(const Exception & exc);
|
||||
|
||||
const char * name() const throw() override { return "Coordination::Exception"; }
|
||||
|
@ -418,8 +418,8 @@ ResponsePtr TestKeeperCheckRequest::createResponse() const { return std::make_sh
|
||||
ResponsePtr TestKeeperMultiRequest::createResponse() const { return std::make_shared<MultiResponse>(); }
|
||||
|
||||
|
||||
TestKeeper::TestKeeper(const String & root_path_, Poco::Timespan operation_timeout)
|
||||
: root_path(root_path_), operation_timeout(operation_timeout)
|
||||
TestKeeper::TestKeeper(const String & root_path_, Poco::Timespan operation_timeout_)
|
||||
: root_path(root_path_), operation_timeout(operation_timeout_)
|
||||
{
|
||||
container.emplace("/", Node());
|
||||
|
||||
|
@ -33,7 +33,7 @@ using TestKeeperRequestPtr = std::shared_ptr<TestKeeperRequest>;
|
||||
class TestKeeper : public IKeeper
|
||||
{
|
||||
public:
|
||||
TestKeeper(const String & root_path, Poco::Timespan operation_timeout);
|
||||
TestKeeper(const String & root_path_, Poco::Timespan operation_timeout_);
|
||||
~TestKeeper() override;
|
||||
|
||||
bool isExpired() const override { return expired; }
|
||||
|
@ -106,10 +106,10 @@ void ZooKeeper::init(const std::string & implementation, const std::string & hos
|
||||
throw KeeperException("Zookeeper root doesn't exist. You should create root node " + chroot + " before start.", Coordination::ZNONODE);
|
||||
}
|
||||
|
||||
ZooKeeper::ZooKeeper(const std::string & hosts, const std::string & identity, int32_t session_timeout_ms,
|
||||
int32_t operation_timeout_ms, const std::string & chroot, const std::string & implementation)
|
||||
ZooKeeper::ZooKeeper(const std::string & hosts_, const std::string & identity_, int32_t session_timeout_ms_,
|
||||
int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation)
|
||||
{
|
||||
init(implementation, hosts, identity, session_timeout_ms, operation_timeout_ms, chroot);
|
||||
init(implementation, hosts_, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_);
|
||||
}
|
||||
|
||||
struct ZooKeeperArgs
|
||||
@ -891,9 +891,9 @@ size_t KeeperMultiException::getFailedOpIndex(int32_t exception_code, const Coor
|
||||
}
|
||||
|
||||
|
||||
KeeperMultiException::KeeperMultiException(int32_t exception_code, const Coordination::Requests & requests, const Coordination::Responses & responses)
|
||||
KeeperMultiException::KeeperMultiException(int32_t exception_code, const Coordination::Requests & requests_, const Coordination::Responses & responses_)
|
||||
: KeeperException("Transaction failed", exception_code),
|
||||
requests(requests), responses(responses), failed_op_index(getFailedOpIndex(exception_code, responses))
|
||||
requests(requests_), responses(responses_), failed_op_index(getFailedOpIndex(exception_code, responses))
|
||||
{
|
||||
addMessage("Op #" + std::to_string(failed_op_index) + ", path: " + getPathForFirstFailedOp());
|
||||
}
|
||||
|
@ -52,10 +52,10 @@ class ZooKeeper
|
||||
public:
|
||||
using Ptr = std::shared_ptr<ZooKeeper>;
|
||||
|
||||
ZooKeeper(const std::string & hosts, const std::string & identity = "",
|
||||
int32_t session_timeout_ms = DEFAULT_SESSION_TIMEOUT,
|
||||
int32_t operation_timeout_ms = DEFAULT_OPERATION_TIMEOUT,
|
||||
const std::string & chroot = "",
|
||||
ZooKeeper(const std::string & hosts_, const std::string & identity_ = "",
|
||||
int32_t session_timeout_ms_ = DEFAULT_SESSION_TIMEOUT,
|
||||
int32_t operation_timeout_ms_ = DEFAULT_OPERATION_TIMEOUT,
|
||||
const std::string & chroot_ = "",
|
||||
const std::string & implementation = "zookeeper");
|
||||
|
||||
/** Config of the form:
|
||||
|
@ -758,17 +758,17 @@ struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
||||
{
|
||||
ZooKeeper::OpNum op_num;
|
||||
bool done;
|
||||
int32_t error;
|
||||
int32_t error_;
|
||||
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(error, in);
|
||||
Coordination::read(error_, in);
|
||||
|
||||
if (!done)
|
||||
throw Exception("Too many results received for multi transaction", ZMARSHALLINGERROR);
|
||||
if (op_num != -1)
|
||||
throw Exception("Unexpected op_num received at the end of results for multi transaction", ZMARSHALLINGERROR);
|
||||
if (error != -1)
|
||||
if (error_ != -1)
|
||||
throw Exception("Unexpected error value received at the end of results for multi transaction", ZMARSHALLINGERROR);
|
||||
}
|
||||
}
|
||||
@ -821,12 +821,12 @@ ZooKeeper::ZooKeeper(
|
||||
const String & root_path_,
|
||||
const String & auth_scheme,
|
||||
const String & auth_data,
|
||||
Poco::Timespan session_timeout,
|
||||
Poco::Timespan session_timeout_,
|
||||
Poco::Timespan connection_timeout,
|
||||
Poco::Timespan operation_timeout)
|
||||
Poco::Timespan operation_timeout_)
|
||||
: root_path(root_path_),
|
||||
session_timeout(session_timeout),
|
||||
operation_timeout(std::min(operation_timeout, session_timeout))
|
||||
session_timeout(session_timeout_),
|
||||
operation_timeout(std::min(operation_timeout_, session_timeout_))
|
||||
{
|
||||
if (!root_path.empty())
|
||||
{
|
||||
|
@ -108,9 +108,9 @@ public:
|
||||
const String & root_path,
|
||||
const String & auth_scheme,
|
||||
const String & auth_data,
|
||||
Poco::Timespan session_timeout,
|
||||
Poco::Timespan session_timeout_,
|
||||
Poco::Timespan connection_timeout,
|
||||
Poco::Timespan operation_timeout);
|
||||
Poco::Timespan operation_timeout_);
|
||||
|
||||
~ZooKeeper() override;
|
||||
|
||||
|
62
dbms/src/Common/checkStackSize.cpp
Normal file
62
dbms/src/Common/checkStackSize.cpp
Normal file
@ -0,0 +1,62 @@
|
||||
#include <Common/checkStackSize.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <ext/scope_guard.h>
|
||||
|
||||
#include <pthread.h>
|
||||
#include <cstdint>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_PTHREAD_ATTR;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int TOO_DEEP_RECURSION;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static thread_local void * stack_address = nullptr;
|
||||
static thread_local size_t max_stack_size = 0;
|
||||
|
||||
void checkStackSize()
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
if (!stack_address)
|
||||
{
|
||||
pthread_attr_t attr;
|
||||
if (0 != pthread_getattr_np(pthread_self(), &attr))
|
||||
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
|
||||
SCOPE_EXIT({ pthread_attr_destroy(&attr); });
|
||||
|
||||
if (0 != pthread_attr_getstack(&attr, &stack_address, &max_stack_size))
|
||||
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
}
|
||||
|
||||
const void * frame_address = __builtin_frame_address(0);
|
||||
uintptr_t int_frame_address = reinterpret_cast<uintptr_t>(frame_address);
|
||||
uintptr_t int_stack_address = reinterpret_cast<uintptr_t>(stack_address);
|
||||
|
||||
/// We assume that stack grows towards lower addresses. And that it starts to grow from the end of a chunk of memory of max_stack_size.
|
||||
if (int_frame_address > int_stack_address + max_stack_size)
|
||||
throw Exception("Logical error: frame address is greater than stack begin address", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
size_t stack_size = int_stack_address + max_stack_size - int_frame_address;
|
||||
|
||||
/// Just check if we have already eat more than a half of stack size. It's a bit overkill (a half of stack size is wasted).
|
||||
/// It's safe to assume that overflow in multiplying by two cannot occur.
|
||||
if (stack_size * 2 > max_stack_size)
|
||||
{
|
||||
std::stringstream message;
|
||||
message << "Stack size too large"
|
||||
<< ". Stack address: " << stack_address
|
||||
<< ", frame address: " << frame_address
|
||||
<< ", stack size: " << stack_size
|
||||
<< ", maximum stack size: " << max_stack_size;
|
||||
throw Exception(message.str(), ErrorCodes::TOO_DEEP_RECURSION);
|
||||
}
|
||||
}
|
7
dbms/src/Common/checkStackSize.h
Normal file
7
dbms/src/Common/checkStackSize.h
Normal file
@ -0,0 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
/** If the stack is large enough and is near its size, throw an exception.
|
||||
* You can call this function in "heavy" functions that may be called recursively
|
||||
* to prevent possible stack overflows.
|
||||
*/
|
||||
void checkStackSize();
|
13
dbms/src/Common/getExecutablePath.cpp
Normal file
13
dbms/src/Common/getExecutablePath.cpp
Normal file
@ -0,0 +1,13 @@
|
||||
#include <Common/getExecutablePath.h>
|
||||
#include <filesystem>
|
||||
|
||||
|
||||
std::string getExecutablePath()
|
||||
{
|
||||
std::error_code ec;
|
||||
std::filesystem::path canonical_path = std::filesystem::canonical("/proc/self/exe", ec);
|
||||
|
||||
if (ec)
|
||||
return {};
|
||||
return canonical_path;
|
||||
}
|
11
dbms/src/Common/getExecutablePath.h
Normal file
11
dbms/src/Common/getExecutablePath.h
Normal file
@ -0,0 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
/** Get path to the running executable if possible.
|
||||
* It is possible when:
|
||||
* - procfs exists;
|
||||
* - there is a /proc/self/exe file;
|
||||
* Otherwise return empty string.
|
||||
*/
|
||||
std::string getExecutablePath();
|
@ -42,7 +42,7 @@ inline void writeHexByteLowercase(UInt8 byte, void * out)
|
||||
|
||||
/// Produces hex representation of an unsigned int with leading zeros (for checksums)
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table)
|
||||
inline void writeHexUIntImpl(TUInt uint_, char * out, const char * const table)
|
||||
{
|
||||
union
|
||||
{
|
||||
@ -50,7 +50,7 @@ inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table)
|
||||
UInt8 uint8[sizeof(TUInt)];
|
||||
};
|
||||
|
||||
value = uint;
|
||||
value = uint_;
|
||||
|
||||
/// Use little endian
|
||||
for (size_t i = 0; i < sizeof(TUInt); ++i)
|
||||
@ -58,30 +58,30 @@ inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table)
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntUppercase(TUInt uint, char * out)
|
||||
inline void writeHexUIntUppercase(TUInt uint_, char * out)
|
||||
{
|
||||
writeHexUIntImpl(uint, out, hex_byte_to_char_uppercase_table);
|
||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntLowercase(TUInt uint, char * out)
|
||||
inline void writeHexUIntLowercase(TUInt uint_, char * out)
|
||||
{
|
||||
writeHexUIntImpl(uint, out, hex_byte_to_char_lowercase_table);
|
||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
std::string getHexUIntUppercase(TUInt uint)
|
||||
std::string getHexUIntUppercase(TUInt uint_)
|
||||
{
|
||||
std::string res(sizeof(TUInt) * 2, '\0');
|
||||
writeHexUIntUppercase(uint, res.data());
|
||||
writeHexUIntUppercase(uint_, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
std::string getHexUIntLowercase(TUInt uint)
|
||||
std::string getHexUIntLowercase(TUInt uint_)
|
||||
{
|
||||
std::string res(sizeof(TUInt) * 2, '\0');
|
||||
writeHexUIntLowercase(uint, res.data());
|
||||
writeHexUIntLowercase(uint_, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -137,17 +137,17 @@ struct Dictionary
|
||||
|
||||
enum class AttributeUnderlyingType
|
||||
{
|
||||
UInt8,
|
||||
UInt16,
|
||||
UInt32,
|
||||
UInt64,
|
||||
Int8,
|
||||
Int16,
|
||||
Int32,
|
||||
Int64,
|
||||
Float32,
|
||||
Float64,
|
||||
String
|
||||
utUInt8,
|
||||
utUInt16,
|
||||
utUInt32,
|
||||
utUInt64,
|
||||
utInt8,
|
||||
utInt16,
|
||||
utInt32,
|
||||
utInt64,
|
||||
utFloat32,
|
||||
utFloat64,
|
||||
utString
|
||||
};
|
||||
|
||||
struct Attribute final
|
||||
@ -172,17 +172,17 @@ struct Dictionary
|
||||
{
|
||||
switch (attribute.type)
|
||||
{
|
||||
case AttributeUnderlyingType::UInt8: std::get<ContainerPtrType<UInt8>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt16: std::get<ContainerPtrType<UInt16>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt32: std::get<ContainerPtrType<UInt32>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt64: std::get<ContainerPtrType<UInt64>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::Int8: std::get<ContainerPtrType<Int8>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int16: std::get<ContainerPtrType<Int16>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int32: std::get<ContainerPtrType<Int32>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int64: std::get<ContainerPtrType<Int64>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Float32: std::get<ContainerPtrType<Float32>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::Float64: std::get<ContainerPtrType<Float64>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::String:
|
||||
case AttributeUnderlyingType::utUInt8: std::get<ContainerPtrType<UInt8>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::utUInt16: std::get<ContainerPtrType<UInt16>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::utUInt32: std::get<ContainerPtrType<UInt32>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::utUInt64: std::get<ContainerPtrType<UInt64>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::utInt8: std::get<ContainerPtrType<Int8>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::utInt16: std::get<ContainerPtrType<Int16>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::utInt32: std::get<ContainerPtrType<Int32>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::utInt64: std::get<ContainerPtrType<Int64>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::utFloat32: std::get<ContainerPtrType<Float32>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::utFloat64: std::get<ContainerPtrType<Float64>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::utString:
|
||||
{
|
||||
const auto & string = value.get<String>();
|
||||
auto & string_ref = std::get<ContainerPtrType<StringRef>>(attribute.arrays)[idx];
|
||||
@ -308,7 +308,7 @@ int main(int argc, char ** argv)
|
||||
constexpr size_t cache_size = 1024;
|
||||
|
||||
Dictionary::Attribute attr;
|
||||
attr.type = Dictionary::AttributeUnderlyingType::String;
|
||||
attr.type = Dictionary::AttributeUnderlyingType::utString;
|
||||
std::get<Dictionary::ContainerPtrType<StringRef>>(attr.arrays).reset(new StringRef[cache_size]{});
|
||||
|
||||
while (true)
|
||||
|
@ -28,7 +28,7 @@ private:
|
||||
friend class COWHelper<IColumn, ConcreteColumn>;
|
||||
|
||||
int data;
|
||||
ConcreteColumn(int data) : data(data) {}
|
||||
ConcreteColumn(int data_) : data(data_) {}
|
||||
ConcreteColumn(const ConcreteColumn &) = default;
|
||||
|
||||
MutableColumnPtr test() const override
|
||||
|
@ -30,7 +30,7 @@ private:
|
||||
friend class COWHelper<IColumn, ConcreteColumn>;
|
||||
|
||||
int data;
|
||||
ConcreteColumn(int data) : data(data) {}
|
||||
ConcreteColumn(int data_) : data(data_) {}
|
||||
ConcreteColumn(const ConcreteColumn &) = default;
|
||||
|
||||
public:
|
||||
|
@ -58,8 +58,8 @@ struct Allocation
|
||||
|
||||
Allocation() {}
|
||||
|
||||
Allocation(size_t size)
|
||||
: size(size)
|
||||
Allocation(size_t size_)
|
||||
: size(size_)
|
||||
{
|
||||
ptr = malloc(size);
|
||||
if (!ptr)
|
||||
|
@ -18,8 +18,8 @@ extern const int UNKNOWN_CODEC;
|
||||
extern const int CORRUPTED_DATA;
|
||||
}
|
||||
|
||||
CompressionCodecMultiple::CompressionCodecMultiple(Codecs codecs)
|
||||
: codecs(codecs)
|
||||
CompressionCodecMultiple::CompressionCodecMultiple(Codecs codecs_)
|
||||
: codecs(codecs_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@ class CompressionCodecMultiple final : public ICompressionCodec
|
||||
{
|
||||
public:
|
||||
CompressionCodecMultiple() = default;
|
||||
explicit CompressionCodecMultiple(Codecs codecs);
|
||||
explicit CompressionCodecMultiple(Codecs codecs_);
|
||||
|
||||
UInt8 getMethodByte() const override;
|
||||
|
||||
|
@ -537,7 +537,6 @@ void decompress(
|
||||
if (source_size == 0 || dest_size == 0)
|
||||
return;
|
||||
|
||||
|
||||
/// Don't run timer if the block is too small.
|
||||
if (dest_size >= 32768)
|
||||
{
|
||||
|
@ -123,7 +123,7 @@ struct PerformanceStatistics
|
||||
}
|
||||
|
||||
PerformanceStatistics() {}
|
||||
PerformanceStatistics(ssize_t choose_method) : choose_method(choose_method) {}
|
||||
PerformanceStatistics(ssize_t choose_method_) : choose_method(choose_method_) {}
|
||||
};
|
||||
|
||||
|
||||
|
@ -334,10 +334,10 @@ auto SequentialGenerator = [](auto stride = 1)
|
||||
template <typename T>
|
||||
struct MonotonicGenerator
|
||||
{
|
||||
MonotonicGenerator(T stride = 1, size_t max_step = 10)
|
||||
MonotonicGenerator(T stride_ = 1, size_t max_step_ = 10)
|
||||
: prev_value(0),
|
||||
stride(stride),
|
||||
max_step(max_step)
|
||||
stride(stride_),
|
||||
max_step(max_step_)
|
||||
{}
|
||||
|
||||
template <typename U>
|
||||
@ -369,9 +369,9 @@ auto MinMaxGenerator = [](auto i)
|
||||
template <typename T>
|
||||
struct RandomGenerator
|
||||
{
|
||||
RandomGenerator(T seed = 0, T value_cap = std::numeric_limits<T>::max())
|
||||
RandomGenerator(T seed = 0, T value_cap_ = std::numeric_limits<T>::max())
|
||||
: e(seed),
|
||||
value_cap(value_cap)
|
||||
value_cap(value_cap_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ namespace DB
|
||||
class TaskNotification final : public Poco::Notification
|
||||
{
|
||||
public:
|
||||
explicit TaskNotification(const BackgroundSchedulePoolTaskInfoPtr & task) : task(task) {}
|
||||
explicit TaskNotification(const BackgroundSchedulePoolTaskInfoPtr & task_) : task(task_) {}
|
||||
void execute() { task->execute(); }
|
||||
|
||||
private:
|
||||
@ -155,8 +155,8 @@ Coordination::WatchCallback BackgroundSchedulePoolTaskInfo::getWatchCallback()
|
||||
}
|
||||
|
||||
|
||||
BackgroundSchedulePool::BackgroundSchedulePool(size_t size)
|
||||
: size(size)
|
||||
BackgroundSchedulePool::BackgroundSchedulePool(size_t size_)
|
||||
: size(size_)
|
||||
{
|
||||
LOG_INFO(&Logger::get("BackgroundSchedulePool"), "Create BackgroundSchedulePool with " << size << " threads");
|
||||
|
||||
|
@ -49,7 +49,7 @@ public:
|
||||
|
||||
size_t getNumberOfThreads() const { return size; }
|
||||
|
||||
BackgroundSchedulePool(size_t size);
|
||||
BackgroundSchedulePool(size_t size_);
|
||||
~BackgroundSchedulePool();
|
||||
|
||||
private:
|
||||
|
@ -33,33 +33,33 @@ void ExternalResultDescription::init(const Block & sample_block_)
|
||||
const IDataType * type = type_not_nullable.get();
|
||||
|
||||
if (typeid_cast<const DataTypeUInt8 *>(type))
|
||||
types.emplace_back(ValueType::UInt8, is_nullable);
|
||||
types.emplace_back(ValueType::vtUInt8, is_nullable);
|
||||
else if (typeid_cast<const DataTypeUInt16 *>(type))
|
||||
types.emplace_back(ValueType::UInt16, is_nullable);
|
||||
types.emplace_back(ValueType::vtUInt16, is_nullable);
|
||||
else if (typeid_cast<const DataTypeUInt32 *>(type))
|
||||
types.emplace_back(ValueType::UInt32, is_nullable);
|
||||
types.emplace_back(ValueType::vtUInt32, is_nullable);
|
||||
else if (typeid_cast<const DataTypeUInt64 *>(type))
|
||||
types.emplace_back(ValueType::UInt64, is_nullable);
|
||||
types.emplace_back(ValueType::vtUInt64, is_nullable);
|
||||
else if (typeid_cast<const DataTypeInt8 *>(type))
|
||||
types.emplace_back(ValueType::Int8, is_nullable);
|
||||
types.emplace_back(ValueType::vtInt8, is_nullable);
|
||||
else if (typeid_cast<const DataTypeInt16 *>(type))
|
||||
types.emplace_back(ValueType::Int16, is_nullable);
|
||||
types.emplace_back(ValueType::vtInt16, is_nullable);
|
||||
else if (typeid_cast<const DataTypeInt32 *>(type))
|
||||
types.emplace_back(ValueType::Int32, is_nullable);
|
||||
types.emplace_back(ValueType::vtInt32, is_nullable);
|
||||
else if (typeid_cast<const DataTypeInt64 *>(type))
|
||||
types.emplace_back(ValueType::Int64, is_nullable);
|
||||
types.emplace_back(ValueType::vtInt64, is_nullable);
|
||||
else if (typeid_cast<const DataTypeFloat32 *>(type))
|
||||
types.emplace_back(ValueType::Float32, is_nullable);
|
||||
types.emplace_back(ValueType::vtFloat32, is_nullable);
|
||||
else if (typeid_cast<const DataTypeFloat64 *>(type))
|
||||
types.emplace_back(ValueType::Float64, is_nullable);
|
||||
types.emplace_back(ValueType::vtFloat64, is_nullable);
|
||||
else if (typeid_cast<const DataTypeString *>(type))
|
||||
types.emplace_back(ValueType::String, is_nullable);
|
||||
types.emplace_back(ValueType::vtString, is_nullable);
|
||||
else if (typeid_cast<const DataTypeDate *>(type))
|
||||
types.emplace_back(ValueType::Date, is_nullable);
|
||||
types.emplace_back(ValueType::vtDate, is_nullable);
|
||||
else if (typeid_cast<const DataTypeDateTime *>(type))
|
||||
types.emplace_back(ValueType::DateTime, is_nullable);
|
||||
types.emplace_back(ValueType::vtDateTime, is_nullable);
|
||||
else if (typeid_cast<const DataTypeUUID *>(type))
|
||||
types.emplace_back(ValueType::UUID, is_nullable);
|
||||
types.emplace_back(ValueType::vtUUID, is_nullable);
|
||||
else
|
||||
throw Exception{"Unsupported type " + type->getName(), ErrorCodes::UNKNOWN_TYPE};
|
||||
}
|
||||
|
@ -12,20 +12,20 @@ struct ExternalResultDescription
|
||||
{
|
||||
enum struct ValueType
|
||||
{
|
||||
UInt8,
|
||||
UInt16,
|
||||
UInt32,
|
||||
UInt64,
|
||||
Int8,
|
||||
Int16,
|
||||
Int32,
|
||||
Int64,
|
||||
Float32,
|
||||
Float64,
|
||||
String,
|
||||
Date,
|
||||
DateTime,
|
||||
UUID,
|
||||
vtUInt8,
|
||||
vtUInt16,
|
||||
vtUInt32,
|
||||
vtUInt64,
|
||||
vtInt8,
|
||||
vtInt16,
|
||||
vtInt32,
|
||||
vtInt64,
|
||||
vtFloat32,
|
||||
vtFloat64,
|
||||
vtString,
|
||||
vtDate,
|
||||
vtDateTime,
|
||||
vtUUID,
|
||||
};
|
||||
|
||||
Block sample_block;
|
||||
|
@ -137,10 +137,10 @@ public:
|
||||
class PacketPayloadReadBuffer : public ReadBuffer
|
||||
{
|
||||
public:
|
||||
PacketPayloadReadBuffer(ReadBuffer & in, uint8_t & sequence_id)
|
||||
: ReadBuffer(in.position(), 0) // not in.buffer().begin(), because working buffer may include previous packet
|
||||
, in(in)
|
||||
, sequence_id(sequence_id)
|
||||
PacketPayloadReadBuffer(ReadBuffer & in_, uint8_t & sequence_id_)
|
||||
: ReadBuffer(in_.position(), 0) // not in.buffer().begin(), because working buffer may include previous packet
|
||||
, in(in_)
|
||||
, sequence_id(sequence_id_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -245,8 +245,8 @@ public:
|
||||
class PacketPayloadWriteBuffer : public WriteBuffer
|
||||
{
|
||||
public:
|
||||
PacketPayloadWriteBuffer(WriteBuffer & out, size_t payload_length, uint8_t & sequence_id)
|
||||
: WriteBuffer(out.position(), 0), out(out), sequence_id(sequence_id), total_left(payload_length)
|
||||
PacketPayloadWriteBuffer(WriteBuffer & out_, size_t payload_length_, uint8_t & sequence_id_)
|
||||
: WriteBuffer(out_.position(), 0), out(out_), sequence_id(sequence_id_), total_left(payload_length_)
|
||||
{
|
||||
startNewPacket();
|
||||
setWorkingBuffer();
|
||||
@ -347,18 +347,18 @@ public:
|
||||
size_t max_packet_size = MAX_PACKET_LENGTH;
|
||||
|
||||
/// For reading and writing.
|
||||
PacketSender(ReadBuffer & in, WriteBuffer & out, uint8_t & sequence_id)
|
||||
: sequence_id(sequence_id)
|
||||
, in(&in)
|
||||
, out(&out)
|
||||
PacketSender(ReadBuffer & in_, WriteBuffer & out_, uint8_t & sequence_id_)
|
||||
: sequence_id(sequence_id_)
|
||||
, in(&in_)
|
||||
, out(&out_)
|
||||
{
|
||||
}
|
||||
|
||||
/// For writing.
|
||||
PacketSender(WriteBuffer & out, uint8_t & sequence_id)
|
||||
: sequence_id(sequence_id)
|
||||
PacketSender(WriteBuffer & out_, uint8_t & sequence_id_)
|
||||
: sequence_id(sequence_id_)
|
||||
, in(nullptr)
|
||||
, out(&out)
|
||||
, out(&out_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -421,15 +421,15 @@ class Handshake : public WritePacket
|
||||
String auth_plugin_name;
|
||||
String auth_plugin_data;
|
||||
public:
|
||||
explicit Handshake(uint32_t capability_flags, uint32_t connection_id, String server_version, String auth_plugin_name, String auth_plugin_data)
|
||||
explicit Handshake(uint32_t capability_flags_, uint32_t connection_id_, String server_version_, String auth_plugin_name_, String auth_plugin_data_)
|
||||
: protocol_version(0xa)
|
||||
, server_version(std::move(server_version))
|
||||
, connection_id(connection_id)
|
||||
, capability_flags(capability_flags)
|
||||
, server_version(std::move(server_version_))
|
||||
, connection_id(connection_id_)
|
||||
, capability_flags(capability_flags_)
|
||||
, character_set(CharacterSet::utf8_general_ci)
|
||||
, status_flags(0)
|
||||
, auth_plugin_name(std::move(auth_plugin_name))
|
||||
, auth_plugin_data(std::move(auth_plugin_data))
|
||||
, auth_plugin_name(std::move(auth_plugin_name_))
|
||||
, auth_plugin_data(std::move(auth_plugin_data_))
|
||||
{
|
||||
}
|
||||
|
||||
@ -532,8 +532,8 @@ class AuthSwitchRequest : public WritePacket
|
||||
String plugin_name;
|
||||
String auth_plugin_data;
|
||||
public:
|
||||
AuthSwitchRequest(String plugin_name, String auth_plugin_data)
|
||||
: plugin_name(std::move(plugin_name)), auth_plugin_data(std::move(auth_plugin_data))
|
||||
AuthSwitchRequest(String plugin_name_, String auth_plugin_data_)
|
||||
: plugin_name(std::move(plugin_name_)), auth_plugin_data(std::move(auth_plugin_data_))
|
||||
{
|
||||
}
|
||||
|
||||
@ -566,7 +566,7 @@ class AuthMoreData : public WritePacket
|
||||
{
|
||||
String data;
|
||||
public:
|
||||
explicit AuthMoreData(String data): data(std::move(data)) {}
|
||||
explicit AuthMoreData(String data_): data(std::move(data_)) {}
|
||||
|
||||
protected:
|
||||
size_t getPayloadSize() const override
|
||||
@ -592,20 +592,20 @@ class OK_Packet : public WritePacket
|
||||
String session_state_changes;
|
||||
String info;
|
||||
public:
|
||||
OK_Packet(uint8_t header,
|
||||
uint32_t capabilities,
|
||||
uint64_t affected_rows,
|
||||
uint32_t status_flags,
|
||||
int16_t warnings,
|
||||
String session_state_changes = "",
|
||||
String info = "")
|
||||
: header(header)
|
||||
, capabilities(capabilities)
|
||||
, affected_rows(affected_rows)
|
||||
, warnings(warnings)
|
||||
, status_flags(status_flags)
|
||||
, session_state_changes(std::move(session_state_changes))
|
||||
, info(std::move(info))
|
||||
OK_Packet(uint8_t header_,
|
||||
uint32_t capabilities_,
|
||||
uint64_t affected_rows_,
|
||||
uint32_t status_flags_,
|
||||
int16_t warnings_,
|
||||
String session_state_changes_ = "",
|
||||
String info_ = "")
|
||||
: header(header_)
|
||||
, capabilities(capabilities_)
|
||||
, affected_rows(affected_rows_)
|
||||
, warnings(warnings_)
|
||||
, status_flags(status_flags_)
|
||||
, session_state_changes(std::move(session_state_changes_))
|
||||
, info(std::move(info_))
|
||||
{
|
||||
}
|
||||
|
||||
@ -671,7 +671,7 @@ class EOF_Packet : public WritePacket
|
||||
int warnings;
|
||||
int status_flags;
|
||||
public:
|
||||
EOF_Packet(int warnings, int status_flags) : warnings(warnings), status_flags(status_flags)
|
||||
EOF_Packet(int warnings_, int status_flags_) : warnings(warnings_), status_flags(status_flags_)
|
||||
{}
|
||||
|
||||
protected:
|
||||
@ -694,8 +694,8 @@ class ERR_Packet : public WritePacket
|
||||
String sql_state;
|
||||
String error_message;
|
||||
public:
|
||||
ERR_Packet(int error_code, String sql_state, String error_message)
|
||||
: error_code(error_code), sql_state(std::move(sql_state)), error_message(std::move(error_message))
|
||||
ERR_Packet(int error_code_, String sql_state_, String error_message_)
|
||||
: error_code(error_code_), sql_state(std::move(sql_state_)), error_message(std::move(error_message_))
|
||||
{
|
||||
}
|
||||
|
||||
@ -730,32 +730,32 @@ class ColumnDefinition : public WritePacket
|
||||
uint8_t decimals = 0x00;
|
||||
public:
|
||||
ColumnDefinition(
|
||||
String schema,
|
||||
String table,
|
||||
String org_table,
|
||||
String name,
|
||||
String org_name,
|
||||
uint16_t character_set,
|
||||
uint32_t column_length,
|
||||
ColumnType column_type,
|
||||
uint16_t flags,
|
||||
uint8_t decimals)
|
||||
String schema_,
|
||||
String table_,
|
||||
String org_table_,
|
||||
String name_,
|
||||
String org_name_,
|
||||
uint16_t character_set_,
|
||||
uint32_t column_length_,
|
||||
ColumnType column_type_,
|
||||
uint16_t flags_,
|
||||
uint8_t decimals_)
|
||||
|
||||
: schema(std::move(schema)), table(std::move(table)), org_table(std::move(org_table)), name(std::move(name)),
|
||||
org_name(std::move(org_name)), character_set(character_set), column_length(column_length), column_type(column_type), flags(flags),
|
||||
decimals(decimals)
|
||||
: schema(std::move(schema_)), table(std::move(table_)), org_table(std::move(org_table_)), name(std::move(name_)),
|
||||
org_name(std::move(org_name_)), character_set(character_set_), column_length(column_length_), column_type(column_type_), flags(flags_),
|
||||
decimals(decimals_)
|
||||
{
|
||||
}
|
||||
|
||||
/// Should be used when column metadata (original name, table, original table, database) is unknown.
|
||||
ColumnDefinition(
|
||||
String name,
|
||||
uint16_t character_set,
|
||||
uint32_t column_length,
|
||||
ColumnType column_type,
|
||||
uint16_t flags,
|
||||
uint8_t decimals)
|
||||
: ColumnDefinition("", "", "", std::move(name), "", character_set, column_length, column_type, flags, decimals)
|
||||
String name_,
|
||||
uint16_t character_set_,
|
||||
uint32_t column_length_,
|
||||
ColumnType column_type_,
|
||||
uint16_t flags_,
|
||||
uint8_t decimals_)
|
||||
: ColumnDefinition("", "", "", std::move(name_), "", character_set_, column_length_, column_type_, flags_, decimals_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -801,7 +801,7 @@ class LengthEncodedNumber : public WritePacket
|
||||
{
|
||||
uint64_t value;
|
||||
public:
|
||||
explicit LengthEncodedNumber(uint64_t value): value(value)
|
||||
explicit LengthEncodedNumber(uint64_t value_): value(value_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -26,16 +26,20 @@ void NamesAndTypesList::readText(ReadBuffer & buf)
|
||||
size_t count;
|
||||
DB::readText(count, buf);
|
||||
assertString(" columns:\n", buf);
|
||||
resize(count);
|
||||
for (NameAndTypePair & it : *this)
|
||||
|
||||
String column_name;
|
||||
String type_name;
|
||||
for (size_t i = 0; i < count; ++i)
|
||||
{
|
||||
readBackQuotedStringWithSQLStyle(it.name, buf);
|
||||
readBackQuotedStringWithSQLStyle(column_name, buf);
|
||||
assertChar(' ', buf);
|
||||
String type_name;
|
||||
readString(type_name, buf);
|
||||
it.type = data_type_factory.get(type_name);
|
||||
assertChar('\n', buf);
|
||||
|
||||
emplace_back(column_name, data_type_factory.get(type_name));
|
||||
}
|
||||
|
||||
assertEOF(buf);
|
||||
}
|
||||
|
||||
void NamesAndTypesList::writeText(WriteBuffer & buf) const
|
||||
|
@ -12,6 +12,41 @@ namespace DB
|
||||
|
||||
struct Null {};
|
||||
|
||||
enum class TypeIndex
|
||||
{
|
||||
Nothing = 0,
|
||||
UInt8,
|
||||
UInt16,
|
||||
UInt32,
|
||||
UInt64,
|
||||
UInt128,
|
||||
Int8,
|
||||
Int16,
|
||||
Int32,
|
||||
Int64,
|
||||
Int128,
|
||||
Float32,
|
||||
Float64,
|
||||
Date,
|
||||
DateTime,
|
||||
String,
|
||||
FixedString,
|
||||
Enum8,
|
||||
Enum16,
|
||||
Decimal32,
|
||||
Decimal64,
|
||||
Decimal128,
|
||||
UUID,
|
||||
Array,
|
||||
Tuple,
|
||||
Set,
|
||||
Interval,
|
||||
Nullable,
|
||||
Function,
|
||||
AggregateFunction,
|
||||
LowCardinality,
|
||||
};
|
||||
|
||||
using UInt8 = uint8_t;
|
||||
using UInt16 = uint16_t;
|
||||
using UInt32 = uint32_t;
|
||||
@ -57,41 +92,6 @@ template <> struct TypeName<Float32> { static const char * get() { return "Float
|
||||
template <> struct TypeName<Float64> { static const char * get() { return "Float64"; } };
|
||||
template <> struct TypeName<String> { static const char * get() { return "String"; } };
|
||||
|
||||
enum class TypeIndex
|
||||
{
|
||||
Nothing = 0,
|
||||
UInt8,
|
||||
UInt16,
|
||||
UInt32,
|
||||
UInt64,
|
||||
UInt128,
|
||||
Int8,
|
||||
Int16,
|
||||
Int32,
|
||||
Int64,
|
||||
Int128,
|
||||
Float32,
|
||||
Float64,
|
||||
Date,
|
||||
DateTime,
|
||||
String,
|
||||
FixedString,
|
||||
Enum8,
|
||||
Enum16,
|
||||
Decimal32,
|
||||
Decimal64,
|
||||
Decimal128,
|
||||
UUID,
|
||||
Array,
|
||||
Tuple,
|
||||
Set,
|
||||
Interval,
|
||||
Nullable,
|
||||
Function,
|
||||
AggregateFunction,
|
||||
LowCardinality,
|
||||
};
|
||||
|
||||
template <typename T> struct TypeId;
|
||||
template <> struct TypeId<UInt8> { static constexpr const TypeIndex value = TypeIndex::UInt8; };
|
||||
template <> struct TypeId<UInt16> { static constexpr const TypeIndex value = TypeIndex::UInt16; };
|
||||
|
@ -45,9 +45,9 @@ private:
|
||||
MergingBlock(const Block & block_,
|
||||
size_t stream_index_,
|
||||
const SortDescription & desc,
|
||||
const String & sign_column_name,
|
||||
BlockPlainPtrs * output_blocks)
|
||||
: block(block_), stream_index(stream_index_), output_blocks(output_blocks)
|
||||
const String & sign_column_name_,
|
||||
BlockPlainPtrs * output_blocks_)
|
||||
: block(block_), stream_index(stream_index_), output_blocks(output_blocks_)
|
||||
{
|
||||
sort_columns.resize(desc.size());
|
||||
for (size_t i = 0; i < desc.size(); ++i)
|
||||
@ -59,7 +59,7 @@ private:
|
||||
sort_columns[i] = block.safeGetByPosition(column_number).column.get();
|
||||
}
|
||||
|
||||
const IColumn * sign_icolumn = block.getByName(sign_column_name).column.get();
|
||||
const IColumn * sign_icolumn = block.getByName(sign_column_name_).column.get();
|
||||
|
||||
sign_column = typeid_cast<const ColumnInt8 *>(sign_icolumn);
|
||||
|
||||
|
@ -8,10 +8,10 @@ namespace ErrorCodes
|
||||
extern const int SET_SIZE_LIMIT_EXCEEDED;
|
||||
}
|
||||
|
||||
DistinctBlockInputStream::DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns)
|
||||
: columns_names(columns)
|
||||
DistinctBlockInputStream::DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns_)
|
||||
: columns_names(columns_)
|
||||
, limit_hint(limit_hint_)
|
||||
, set_size_limits(set_size_limits)
|
||||
, set_size_limits(set_size_limits_)
|
||||
{
|
||||
children.push_back(input);
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ class DistinctBlockInputStream : public IBlockInputStream
|
||||
{
|
||||
public:
|
||||
/// Empty columns_ means all collumns.
|
||||
DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns);
|
||||
DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns_);
|
||||
|
||||
String getName() const override { return "Distinct"; }
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user