mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge with master
This commit is contained in:
commit
428c753ed7
@ -185,7 +185,7 @@
|
||||
* Fixed "select_format" performance test for `Pretty` formats [#5642](https://github.com/yandex/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
|
||||
|
||||
## ClickHouse release 19.9.4.1, 2019-07-05
|
||||
## ClickHouse release 19.9.3.31, 2019-07-05
|
||||
|
||||
### Bug Fix
|
||||
* Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [#5786](https://github.com/yandex/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin))
|
||||
@ -197,7 +197,7 @@
|
||||
* Fix race condition, which cause that some queries may not appear in query_log instantly after SYSTEM FLUSH LOGS query. [#5685](https://github.com/yandex/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ))
|
||||
* Added missing support for constant arguments to `evalMLModel` function. [#5820](https://github.com/yandex/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
|
||||
## ClickHouse release 19.7.6.1, 2019-07-05
|
||||
## ClickHouse release 19.7.5.29, 2019-07-05
|
||||
|
||||
### Bug Fix
|
||||
* Fix performance regression in some queries with JOIN. [#5192](https://github.com/yandex/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014))
|
||||
|
@ -437,10 +437,10 @@ message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE
|
||||
|
||||
include(GNUInstallDirs)
|
||||
include (cmake/find_contrib_lib.cmake)
|
||||
include (cmake/lib_name.cmake)
|
||||
|
||||
find_contrib_lib(double-conversion) # Must be before parquet
|
||||
include (cmake/find_ssl.cmake)
|
||||
include (cmake/lib_name.cmake)
|
||||
include (cmake/find_icu.cmake)
|
||||
include (cmake/find_boost.cmake)
|
||||
include (cmake/find_zlib.cmake)
|
||||
|
@ -1,4 +1,6 @@
|
||||
option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Michael Parakhin" ${NOT_UNBUNDLED})
|
||||
if (NOT ARCH_ARM AND NOT OS_FREEBSD)
|
||||
option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${NOT_UNBUNDLED})
|
||||
endif ()
|
||||
|
||||
if (ENABLE_FASTOPS)
|
||||
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fastops/fastops/fastops.h")
|
||||
@ -12,4 +14,4 @@ else ()
|
||||
set(USE_FASTOPS 0)
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using fastops")
|
||||
message (STATUS "Using fastops=${USE_FASTOPS}: ${FASTOPS_INCLUDE_DIR} : ${FASTOPS_LIBRARY}")
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit ea2516be366a73a02a82b499ed4a7db1d40037e0
|
||||
Subproject commit 7a2d304c21549427460428c9039009ef4bbfd899
|
@ -48,7 +48,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow -Wshadow-uncaptured-local -Wextra-semi -Wcomma -Winconsistent-missing-destructor-override -Wunused-exception-parameter -Wcovered-switch-default -Wold-style-cast -Wrange-loop-analysis -Wunused-member-function -Wunreachable-code -Wunreachable-code-return -Wnewline-eof -Wembedded-directive -Wgnu-case-range -Wunused-macros -Wconditional-uninitialized -Wdeprecated -Wundef -Wreserved-id-macro -Wredundant-parens -Wzero-as-null-pointer-constant")
|
||||
|
||||
if (WEVERYTHING)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-shadow-field-in-constructor -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed")
|
||||
|
||||
# TODO Enable conversion, sign-conversion, double-promotion warnings.
|
||||
endif ()
|
||||
@ -71,7 +71,9 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-ctad-maybe-unsupported")
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow")
|
||||
endif()
|
||||
|
||||
if (USE_DEBUG_HELPERS)
|
||||
set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/libs/libcommon/include/common/iostream_debug_helpers.h")
|
||||
|
@ -711,7 +711,7 @@ private:
|
||||
if (ignore_error)
|
||||
{
|
||||
Tokens tokens(begin, end);
|
||||
TokenIterator token_iterator(tokens);
|
||||
IParser::Pos token_iterator(tokens);
|
||||
while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid())
|
||||
++token_iterator;
|
||||
begin = token_iterator->end;
|
||||
|
@ -123,7 +123,7 @@ enum class TaskState
|
||||
struct TaskStateWithOwner
|
||||
{
|
||||
TaskStateWithOwner() = default;
|
||||
TaskStateWithOwner(TaskState state, const String & owner) : state(state), owner(owner) {}
|
||||
TaskStateWithOwner(TaskState state_, const String & owner_) : state(state_), owner(owner_) {}
|
||||
|
||||
TaskState state{TaskState::Unknown};
|
||||
String owner;
|
||||
@ -2100,9 +2100,9 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self)
|
||||
|
||||
// process_id is '<hostname>#<start_timestamp>_<pid>'
|
||||
time_t timestamp = Poco::Timestamp().epochTime();
|
||||
auto pid = Poco::Process::id();
|
||||
auto curr_pid = Poco::Process::id();
|
||||
|
||||
process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(pid);
|
||||
process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid);
|
||||
host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id;
|
||||
process_path = Poco::Path(base_dir + "/clickhouse-copier_" + process_id).absolute().toString();
|
||||
Poco::File(process_path).createDirectories();
|
||||
|
@ -176,7 +176,7 @@ private:
|
||||
const UInt64 seed;
|
||||
|
||||
public:
|
||||
UnsignedIntegerModel(UInt64 seed) : seed(seed) {}
|
||||
UnsignedIntegerModel(UInt64 seed_) : seed(seed_) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -212,7 +212,7 @@ private:
|
||||
const UInt64 seed;
|
||||
|
||||
public:
|
||||
SignedIntegerModel(UInt64 seed) : seed(seed) {}
|
||||
SignedIntegerModel(UInt64 seed_) : seed(seed_) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -256,7 +256,7 @@ private:
|
||||
Float res_prev_value = 0;
|
||||
|
||||
public:
|
||||
FloatModel(UInt64 seed) : seed(seed) {}
|
||||
FloatModel(UInt64 seed_) : seed(seed_) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -348,7 +348,7 @@ private:
|
||||
const UInt64 seed;
|
||||
|
||||
public:
|
||||
FixedStringModel(UInt64 seed) : seed(seed) {}
|
||||
FixedStringModel(UInt64 seed_) : seed(seed_) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -385,7 +385,7 @@ private:
|
||||
const DateLUTImpl & date_lut;
|
||||
|
||||
public:
|
||||
DateTimeModel(UInt64 seed) : seed(seed), date_lut(DateLUT::instance()) {}
|
||||
DateTimeModel(UInt64 seed_) : seed(seed_), date_lut(DateLUT::instance()) {}
|
||||
|
||||
void train(const IColumn &) override {}
|
||||
void finalize() override {}
|
||||
@ -533,8 +533,8 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
MarkovModel(MarkovModelParameters params)
|
||||
: params(std::move(params)), code_points(params.order, BEGIN) {}
|
||||
MarkovModel(MarkovModelParameters params_)
|
||||
: params(std::move(params_)), code_points(params.order, BEGIN) {}
|
||||
|
||||
void consume(const char * data, size_t size)
|
||||
{
|
||||
@ -745,7 +745,7 @@ private:
|
||||
MarkovModel markov_model;
|
||||
|
||||
public:
|
||||
StringModel(UInt64 seed, MarkovModelParameters params) : seed(seed), markov_model(std::move(params)) {}
|
||||
StringModel(UInt64 seed_, MarkovModelParameters params_) : seed(seed_), markov_model(std::move(params_)) {}
|
||||
|
||||
void train(const IColumn & column) override
|
||||
{
|
||||
@ -797,7 +797,7 @@ private:
|
||||
ModelPtr nested_model;
|
||||
|
||||
public:
|
||||
ArrayModel(ModelPtr nested_model) : nested_model(std::move(nested_model)) {}
|
||||
ArrayModel(ModelPtr nested_model_) : nested_model(std::move(nested_model_)) {}
|
||||
|
||||
void train(const IColumn & column) override
|
||||
{
|
||||
@ -830,7 +830,7 @@ private:
|
||||
ModelPtr nested_model;
|
||||
|
||||
public:
|
||||
NullableModel(ModelPtr nested_model) : nested_model(std::move(nested_model)) {}
|
||||
NullableModel(ModelPtr nested_model_) : nested_model(std::move(nested_model_)) {}
|
||||
|
||||
void train(const IColumn & column) override
|
||||
{
|
||||
|
@ -18,12 +18,12 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
ODBCBlockInputStream::ODBCBlockInputStream(
|
||||
Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size)
|
||||
: session{session}
|
||||
Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_)
|
||||
: session{session_}
|
||||
, statement{(this->session << query_str, Poco::Data::Keywords::now)}
|
||||
, result{statement}
|
||||
, iterator{result.begin()}
|
||||
, max_block_size{max_block_size}
|
||||
, max_block_size{max_block_size_}
|
||||
, log(&Logger::get("ODBCBlockInputStream"))
|
||||
{
|
||||
if (sample_block.columns() != result.columnCount())
|
||||
@ -43,46 +43,46 @@ namespace
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case ValueType::UInt8:
|
||||
case ValueType::vtUInt8:
|
||||
static_cast<ColumnUInt8 &>(column).insertValue(value.convert<UInt64>());
|
||||
break;
|
||||
case ValueType::UInt16:
|
||||
case ValueType::vtUInt16:
|
||||
static_cast<ColumnUInt16 &>(column).insertValue(value.convert<UInt64>());
|
||||
break;
|
||||
case ValueType::UInt32:
|
||||
case ValueType::vtUInt32:
|
||||
static_cast<ColumnUInt32 &>(column).insertValue(value.convert<UInt64>());
|
||||
break;
|
||||
case ValueType::UInt64:
|
||||
case ValueType::vtUInt64:
|
||||
static_cast<ColumnUInt64 &>(column).insertValue(value.convert<UInt64>());
|
||||
break;
|
||||
case ValueType::Int8:
|
||||
case ValueType::vtInt8:
|
||||
static_cast<ColumnInt8 &>(column).insertValue(value.convert<Int64>());
|
||||
break;
|
||||
case ValueType::Int16:
|
||||
case ValueType::vtInt16:
|
||||
static_cast<ColumnInt16 &>(column).insertValue(value.convert<Int64>());
|
||||
break;
|
||||
case ValueType::Int32:
|
||||
case ValueType::vtInt32:
|
||||
static_cast<ColumnInt32 &>(column).insertValue(value.convert<Int64>());
|
||||
break;
|
||||
case ValueType::Int64:
|
||||
case ValueType::vtInt64:
|
||||
static_cast<ColumnInt64 &>(column).insertValue(value.convert<Int64>());
|
||||
break;
|
||||
case ValueType::Float32:
|
||||
case ValueType::vtFloat32:
|
||||
static_cast<ColumnFloat32 &>(column).insertValue(value.convert<Float64>());
|
||||
break;
|
||||
case ValueType::Float64:
|
||||
case ValueType::vtFloat64:
|
||||
static_cast<ColumnFloat64 &>(column).insertValue(value.convert<Float64>());
|
||||
break;
|
||||
case ValueType::String:
|
||||
case ValueType::vtString:
|
||||
static_cast<ColumnString &>(column).insert(value.convert<String>());
|
||||
break;
|
||||
case ValueType::Date:
|
||||
case ValueType::vtDate:
|
||||
static_cast<ColumnUInt16 &>(column).insertValue(UInt16{LocalDate{value.convert<String>()}.getDayNum()});
|
||||
break;
|
||||
case ValueType::DateTime:
|
||||
case ValueType::vtDateTime:
|
||||
static_cast<ColumnUInt32 &>(column).insertValue(time_t{LocalDateTime{value.convert<String>()}});
|
||||
break;
|
||||
case ValueType::UUID:
|
||||
case ValueType::vtUUID:
|
||||
static_cast<ColumnUInt128 &>(column).insert(parse<UUID>(value.convert<std::string>()));
|
||||
break;
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ class ODBCBlockInputStream final : public IBlockInputStream
|
||||
{
|
||||
public:
|
||||
ODBCBlockInputStream(
|
||||
Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size);
|
||||
Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_);
|
||||
|
||||
String getName() const override { return "ODBC"; }
|
||||
|
||||
|
@ -324,7 +324,6 @@ try
|
||||
using po::value;
|
||||
using Strings = DB::Strings;
|
||||
|
||||
|
||||
po::options_description desc("Allowed options");
|
||||
desc.add_options()
|
||||
("help", "produce help message")
|
||||
|
@ -16,8 +16,8 @@ namespace DB
|
||||
{
|
||||
|
||||
MetricsTransmitter::MetricsTransmitter(
|
||||
const Poco::Util::AbstractConfiguration & config, const std::string & config_name, const AsynchronousMetrics & async_metrics)
|
||||
: async_metrics(async_metrics), config_name(config_name)
|
||||
const Poco::Util::AbstractConfiguration & config, const std::string & config_name_, const AsynchronousMetrics & async_metrics_)
|
||||
: async_metrics(async_metrics_), config_name(config_name_)
|
||||
{
|
||||
interval_seconds = config.getInt(config_name + ".interval", 60);
|
||||
send_events = config.getBool(config_name + ".events", true);
|
||||
|
@ -32,7 +32,7 @@ class AsynchronousMetrics;
|
||||
class MetricsTransmitter
|
||||
{
|
||||
public:
|
||||
MetricsTransmitter(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, const AsynchronousMetrics & async_metrics);
|
||||
MetricsTransmitter(const Poco::Util::AbstractConfiguration & config, const std::string & config_name_, const AsynchronousMetrics & async_metrics_);
|
||||
~MetricsTransmitter();
|
||||
|
||||
private:
|
||||
|
@ -37,14 +37,14 @@ namespace ErrorCodes
|
||||
extern const int OPENSSL_ERROR;
|
||||
}
|
||||
|
||||
MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key, RSA & private_key, bool ssl_enabled, size_t connection_id)
|
||||
MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key_, RSA & private_key_, bool ssl_enabled, size_t connection_id_)
|
||||
: Poco::Net::TCPServerConnection(socket_)
|
||||
, server(server_)
|
||||
, log(&Poco::Logger::get("MySQLHandler"))
|
||||
, connection_context(server.context())
|
||||
, connection_id(connection_id)
|
||||
, public_key(public_key)
|
||||
, private_key(private_key)
|
||||
, connection_id(connection_id_)
|
||||
, public_key(public_key_)
|
||||
, private_key(private_key_)
|
||||
{
|
||||
server_capability_flags = CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION | CLIENT_PLUGIN_AUTH | CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA | CLIENT_CONNECT_WITH_DB | CLIENT_DEPRECATE_EOF;
|
||||
if (ssl_enabled)
|
||||
|
@ -14,7 +14,7 @@ namespace DB
|
||||
class MySQLHandler : public Poco::Net::TCPServerConnection
|
||||
{
|
||||
public:
|
||||
MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key, RSA & private_key, bool ssl_enabled, size_t connection_id);
|
||||
MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key_, RSA & private_key_, bool ssl_enabled, size_t connection_id_);
|
||||
|
||||
void run() final;
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <Common/getFQDNOrHostName.h>
|
||||
#include <Common/getMultipleKeysFromConfig.h>
|
||||
#include <Common/getNumberOfPhysicalCPUCores.h>
|
||||
#include <Common/getExecutablePath.h>
|
||||
#include <Common/TaskStatsInfoGetter.h>
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <IO/HTTPCommon.h>
|
||||
@ -156,19 +157,19 @@ std::string Server::getDefaultCorePath() const
|
||||
return getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH)) + "cores";
|
||||
}
|
||||
|
||||
void Server::defineOptions(Poco::Util::OptionSet & _options)
|
||||
void Server::defineOptions(Poco::Util::OptionSet & options)
|
||||
{
|
||||
_options.addOption(
|
||||
options.addOption(
|
||||
Poco::Util::Option("help", "h", "show help and exit")
|
||||
.required(false)
|
||||
.repeatable(false)
|
||||
.binding("help"));
|
||||
_options.addOption(
|
||||
options.addOption(
|
||||
Poco::Util::Option("version", "V", "show version and exit")
|
||||
.required(false)
|
||||
.repeatable(false)
|
||||
.binding("version"));
|
||||
BaseDaemon::defineOptions(_options);
|
||||
BaseDaemon::defineOptions(options);
|
||||
}
|
||||
|
||||
int Server::main(const std::vector<std::string> & /*args*/)
|
||||
@ -212,6 +213,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
const auto memory_amount = getMemoryAmount();
|
||||
|
||||
#if defined(__linux__)
|
||||
std::string executable_path = getExecutablePath();
|
||||
if (executable_path.empty())
|
||||
executable_path = "/usr/bin/clickhouse"; /// It is used for information messages.
|
||||
|
||||
/// After full config loaded
|
||||
{
|
||||
if (config().getBool("mlock_executable", false))
|
||||
@ -228,7 +233,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
LOG_INFO(log, "It looks like the process has no CAP_IPC_LOCK capability, binary mlock will be disabled."
|
||||
" It could happen due to incorrect ClickHouse package installation."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep /usr/bin/clickhouse'."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_ipc_lock=+ep " << executable_path << "'."
|
||||
" Note that it will not work on 'nosuid' mounted filesystems.");
|
||||
}
|
||||
}
|
||||
@ -547,7 +552,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
LOG_INFO(log, "It looks like the process has no CAP_NET_ADMIN capability, 'taskstats' performance statistics will be disabled."
|
||||
" It could happen due to incorrect ClickHouse package installation."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_net_admin=+ep /usr/bin/clickhouse'."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_net_admin=+ep " << executable_path << "'."
|
||||
" Note that it will not work on 'nosuid' mounted filesystems."
|
||||
" It also doesn't work if you run clickhouse-server inside network namespace as it happens in some containers.");
|
||||
}
|
||||
@ -556,7 +561,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
LOG_INFO(log, "It looks like the process has no CAP_SYS_NICE capability, the setting 'os_thread_nice' will have no effect."
|
||||
" It could happen due to incorrect ClickHouse package installation."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep /usr/bin/clickhouse'."
|
||||
" You could resolve the problem manually with 'sudo setcap cap_sys_nice=+ep " << executable_path << "'."
|
||||
" Note that it will not work on 'nosuid' mounted filesystems.");
|
||||
}
|
||||
#else
|
||||
|
@ -35,8 +35,8 @@ private:
|
||||
const DataTypePtr & type_val;
|
||||
|
||||
public:
|
||||
AggregateFunctionArgMinMax(const DataTypePtr & type_res, const DataTypePtr & type_val)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionArgMinMax<Data, AllocatesMemoryInArena>>({type_res, type_val}, {}),
|
||||
AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionArgMinMax<Data, AllocatesMemoryInArena>>({type_res_, type_val_}, {}),
|
||||
type_res(this->argument_types[0]), type_val(this->argument_types[1])
|
||||
{
|
||||
if (!type_val->isComparable())
|
||||
|
@ -62,12 +62,6 @@ public:
|
||||
static_cast<ColumnUInt64 &>(to).getData().push_back(data(place).count);
|
||||
}
|
||||
|
||||
/// May be used for optimization.
|
||||
void addDelta(AggregateDataPtr place, UInt64 x) const
|
||||
{
|
||||
data(place).count += x;
|
||||
}
|
||||
|
||||
const char * getHeaderFilePath() const override { return __FILE__; }
|
||||
};
|
||||
|
||||
|
@ -253,8 +253,8 @@ class GroupArrayGeneralListImpl final
|
||||
UInt64 max_elems;
|
||||
|
||||
public:
|
||||
GroupArrayGeneralListImpl(const DataTypePtr & data_type, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<GroupArrayGeneralListData<Node>, GroupArrayGeneralListImpl<Node, limit_num_elems>>({data_type}, {})
|
||||
GroupArrayGeneralListImpl(const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<GroupArrayGeneralListData<Node>, GroupArrayGeneralListImpl<Node, limit_num_elems>>({data_type_}, {})
|
||||
, data_type(this->argument_types[0]), max_elems(max_elems_) {}
|
||||
|
||||
String getName() const override { return "groupArray"; }
|
||||
|
@ -164,8 +164,8 @@ class AggregateFunctionGroupUniqArrayGeneric
|
||||
}
|
||||
|
||||
public:
|
||||
AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData, AggregateFunctionGroupUniqArrayGeneric<is_plain_column, Tlimit_num_elem>>({input_data_type}, {})
|
||||
AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type_, UInt64 max_elems_ = std::numeric_limits<UInt64>::max())
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionGroupUniqArrayGenericData, AggregateFunctionGroupUniqArrayGeneric<is_plain_column, Tlimit_num_elem>>({input_data_type_}, {})
|
||||
, input_data_type(this->argument_types[0])
|
||||
, max_elems(max_elems_) {}
|
||||
|
||||
|
@ -304,9 +304,9 @@ private:
|
||||
const UInt32 max_bins;
|
||||
|
||||
public:
|
||||
AggregateFunctionHistogram(UInt32 max_bins, const DataTypes & arguments, const Array & params)
|
||||
AggregateFunctionHistogram(UInt32 max_bins_, const DataTypes & arguments, const Array & params)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionHistogramData, AggregateFunctionHistogram<T>>(arguments, params)
|
||||
, max_bins(max_bins)
|
||||
, max_bins(max_bins_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -104,21 +104,21 @@ void registerAggregateFunctionMLMethod(AggregateFunctionFactory & factory)
|
||||
}
|
||||
|
||||
LinearModelData::LinearModelData(
|
||||
Float64 learning_rate,
|
||||
Float64 l2_reg_coef,
|
||||
UInt64 param_num,
|
||||
UInt64 batch_capacity,
|
||||
std::shared_ptr<DB::IGradientComputer> gradient_computer,
|
||||
std::shared_ptr<DB::IWeightsUpdater> weights_updater)
|
||||
: learning_rate(learning_rate)
|
||||
, l2_reg_coef(l2_reg_coef)
|
||||
, batch_capacity(batch_capacity)
|
||||
Float64 learning_rate_,
|
||||
Float64 l2_reg_coef_,
|
||||
UInt64 param_num_,
|
||||
UInt64 batch_capacity_,
|
||||
std::shared_ptr<DB::IGradientComputer> gradient_computer_,
|
||||
std::shared_ptr<DB::IWeightsUpdater> weights_updater_)
|
||||
: learning_rate(learning_rate_)
|
||||
, l2_reg_coef(l2_reg_coef_)
|
||||
, batch_capacity(batch_capacity_)
|
||||
, batch_size(0)
|
||||
, gradient_computer(std::move(gradient_computer))
|
||||
, weights_updater(std::move(weights_updater))
|
||||
, gradient_computer(std::move(gradient_computer_))
|
||||
, weights_updater(std::move(weights_updater_))
|
||||
{
|
||||
weights.resize(param_num, Float64{0.0});
|
||||
gradient_batch.resize(param_num + 1, Float64{0.0});
|
||||
weights.resize(param_num_, Float64{0.0});
|
||||
gradient_batch.resize(param_num_ + 1, Float64{0.0});
|
||||
}
|
||||
|
||||
void LinearModelData::update_state()
|
||||
|
@ -248,12 +248,12 @@ public:
|
||||
LinearModelData() {}
|
||||
|
||||
LinearModelData(
|
||||
Float64 learning_rate,
|
||||
Float64 l2_reg_coef,
|
||||
UInt64 param_num,
|
||||
UInt64 batch_capacity,
|
||||
std::shared_ptr<IGradientComputer> gradient_computer,
|
||||
std::shared_ptr<IWeightsUpdater> weights_updater);
|
||||
Float64 learning_rate_,
|
||||
Float64 l2_reg_coef_,
|
||||
UInt64 param_num_,
|
||||
UInt64 batch_capacity_,
|
||||
std::shared_ptr<IGradientComputer> gradient_computer_,
|
||||
std::shared_ptr<IWeightsUpdater> weights_updater_);
|
||||
|
||||
void add(const IColumn ** columns, size_t row_num);
|
||||
|
||||
@ -304,21 +304,21 @@ public:
|
||||
String getName() const override { return Name::name; }
|
||||
|
||||
explicit AggregateFunctionMLMethod(
|
||||
UInt32 param_num,
|
||||
std::unique_ptr<IGradientComputer> gradient_computer,
|
||||
std::string weights_updater_name,
|
||||
Float64 learning_rate,
|
||||
Float64 l2_reg_coef,
|
||||
UInt64 batch_size,
|
||||
UInt32 param_num_,
|
||||
std::unique_ptr<IGradientComputer> gradient_computer_,
|
||||
std::string weights_updater_name_,
|
||||
Float64 learning_rate_,
|
||||
Float64 l2_reg_coef_,
|
||||
UInt64 batch_size_,
|
||||
const DataTypes & arguments_types,
|
||||
const Array & params)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionMLMethod<Data, Name>>(arguments_types, params)
|
||||
, param_num(param_num)
|
||||
, learning_rate(learning_rate)
|
||||
, l2_reg_coef(l2_reg_coef)
|
||||
, batch_size(batch_size)
|
||||
, gradient_computer(std::move(gradient_computer))
|
||||
, weights_updater_name(std::move(weights_updater_name))
|
||||
, param_num(param_num_)
|
||||
, learning_rate(learning_rate_)
|
||||
, l2_reg_coef(l2_reg_coef_)
|
||||
, batch_size(batch_size_)
|
||||
, gradient_computer(std::move(gradient_computer_))
|
||||
, weights_updater_name(std::move(weights_updater_name_))
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -679,8 +679,8 @@ private:
|
||||
DataTypePtr & type;
|
||||
|
||||
public:
|
||||
AggregateFunctionsSingleValue(const DataTypePtr & type)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionsSingleValue<Data, AllocatesMemoryInArena>>({type}, {})
|
||||
AggregateFunctionsSingleValue(const DataTypePtr & type_)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionsSingleValue<Data, AllocatesMemoryInArena>>({type_}, {})
|
||||
, type(this->argument_types[0])
|
||||
{
|
||||
if (StringRef(Data::name()) == StringRef("min")
|
||||
|
@ -76,8 +76,8 @@ private:
|
||||
DataTypePtr & argument_type;
|
||||
|
||||
public:
|
||||
AggregateFunctionQuantile(const DataTypePtr & argument_type, const Array & params)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionQuantile<Value, Data, Name, has_second_arg, FloatReturnType, returns_many>>({argument_type}, params)
|
||||
AggregateFunctionQuantile(const DataTypePtr & argument_type_, const Array & params)
|
||||
: IAggregateFunctionDataHelper<Data, AggregateFunctionQuantile<Value, Data, Name, has_second_arg, FloatReturnType, returns_many>>({argument_type_}, params)
|
||||
, levels(params, returns_many), level(levels.levels[0]), argument_type(this->argument_types[0])
|
||||
{
|
||||
if (!returns_many && levels.size() > 1)
|
||||
|
@ -33,18 +33,18 @@ private:
|
||||
|
||||
public:
|
||||
AggregateFunctionResample(
|
||||
AggregateFunctionPtr nested_function,
|
||||
Key begin,
|
||||
Key end,
|
||||
size_t step,
|
||||
AggregateFunctionPtr nested_function_,
|
||||
Key begin_,
|
||||
Key end_,
|
||||
size_t step_,
|
||||
const DataTypes & arguments,
|
||||
const Array & params)
|
||||
: IAggregateFunctionHelper<AggregateFunctionResample<Key>>{arguments, params}
|
||||
, nested_function{nested_function}
|
||||
, nested_function{nested_function_}
|
||||
, last_col{arguments.size() - 1}
|
||||
, begin{begin}
|
||||
, end{end}
|
||||
, step{step}
|
||||
, begin{begin_}
|
||||
, end{end_}
|
||||
, step{step_}
|
||||
, total{0}
|
||||
, aod{nested_function->alignOfData()}
|
||||
, sod{(nested_function->sizeOfData() + aod - 1) / aod * aod}
|
||||
|
@ -142,9 +142,9 @@ template <typename T, typename Data, typename Derived>
|
||||
class AggregateFunctionSequenceBase : public IAggregateFunctionDataHelper<Data, Derived>
|
||||
{
|
||||
public:
|
||||
AggregateFunctionSequenceBase(const DataTypes & arguments, const Array & params, const String & pattern)
|
||||
AggregateFunctionSequenceBase(const DataTypes & arguments, const Array & params, const String & pattern_)
|
||||
: IAggregateFunctionDataHelper<Data, Derived>(arguments, params)
|
||||
, pattern(pattern)
|
||||
, pattern(pattern_)
|
||||
{
|
||||
arg_count = arguments.size();
|
||||
parsePattern();
|
||||
@ -199,7 +199,7 @@ private:
|
||||
std::uint64_t extra;
|
||||
|
||||
PatternAction() = default;
|
||||
PatternAction(const PatternActionType type, const std::uint64_t extra = 0) : type{type}, extra{extra} {}
|
||||
PatternAction(const PatternActionType type_, const std::uint64_t extra_ = 0) : type{type_}, extra{extra_} {}
|
||||
};
|
||||
|
||||
using PatternActions = PODArrayWithStackMemory<PatternAction, 64>;
|
||||
@ -520,8 +520,8 @@ private:
|
||||
|
||||
struct DFAState
|
||||
{
|
||||
DFAState(bool has_kleene = false)
|
||||
: has_kleene{has_kleene}, event{0}, transition{DFATransition::None}
|
||||
DFAState(bool has_kleene_ = false)
|
||||
: has_kleene{has_kleene_}, event{0}, transition{DFATransition::None}
|
||||
{}
|
||||
|
||||
/// .-------.
|
||||
@ -554,8 +554,8 @@ template <typename T, typename Data>
|
||||
class AggregateFunctionSequenceMatch final : public AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceMatch<T, Data>>
|
||||
{
|
||||
public:
|
||||
AggregateFunctionSequenceMatch(const DataTypes & arguments, const Array & params, const String & pattern)
|
||||
: AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceMatch<T, Data>>(arguments, params, pattern) {}
|
||||
AggregateFunctionSequenceMatch(const DataTypes & arguments, const Array & params, const String & pattern_)
|
||||
: AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceMatch<T, Data>>(arguments, params, pattern_) {}
|
||||
|
||||
using AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceMatch<T, Data>>::AggregateFunctionSequenceBase;
|
||||
|
||||
@ -582,8 +582,8 @@ template <typename T, typename Data>
|
||||
class AggregateFunctionSequenceCount final : public AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceCount<T, Data>>
|
||||
{
|
||||
public:
|
||||
AggregateFunctionSequenceCount(const DataTypes & arguments, const Array & params, const String & pattern)
|
||||
: AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceCount<T, Data>>(arguments, params, pattern) {}
|
||||
AggregateFunctionSequenceCount(const DataTypes & arguments, const Array & params, const String & pattern_)
|
||||
: AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceCount<T, Data>>(arguments, params, pattern_) {}
|
||||
|
||||
using AggregateFunctionSequenceBase<T, Data, AggregateFunctionSequenceCount<T, Data>>::AggregateFunctionSequenceBase;
|
||||
|
||||
|
@ -23,9 +23,9 @@ private:
|
||||
Array params;
|
||||
|
||||
public:
|
||||
AggregateFunctionState(AggregateFunctionPtr nested, const DataTypes & arguments, const Array & params)
|
||||
: IAggregateFunctionHelper<AggregateFunctionState>(arguments, params)
|
||||
, nested_func(nested), arguments(arguments), params(params) {}
|
||||
AggregateFunctionState(AggregateFunctionPtr nested_, const DataTypes & arguments_, const Array & params_)
|
||||
: IAggregateFunctionHelper<AggregateFunctionState>(arguments_, params_)
|
||||
, nested_func(nested_), arguments(arguments_), params(params_) {}
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
|
@ -62,10 +62,10 @@ private:
|
||||
|
||||
public:
|
||||
AggregateFunctionSumMapBase(
|
||||
const DataTypePtr & keys_type, const DataTypes & values_types,
|
||||
const DataTypePtr & keys_type_, const DataTypes & values_types_,
|
||||
const DataTypes & argument_types_, const Array & params_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionSumMapData<NearestFieldType<T>>, Derived>(argument_types_, params_)
|
||||
, keys_type(keys_type), values_types(values_types) {}
|
||||
, keys_type(keys_type_), values_types(values_types_) {}
|
||||
|
||||
String getName() const override { return "sumMap"; }
|
||||
|
||||
@ -295,9 +295,9 @@ private:
|
||||
|
||||
public:
|
||||
AggregateFunctionSumMapFiltered(
|
||||
const DataTypePtr & keys_type, const DataTypes & values_types, const Array & keys_to_keep_,
|
||||
const DataTypePtr & keys_type_, const DataTypes & values_types_, const Array & keys_to_keep_,
|
||||
const DataTypes & argument_types_, const Array & params_)
|
||||
: Base{keys_type, values_types, argument_types_, params_}
|
||||
: Base{keys_type_, values_types_, argument_types_, params_}
|
||||
{
|
||||
keys_to_keep.reserve(keys_to_keep_.size());
|
||||
for (const Field & f : keys_to_keep_)
|
||||
|
@ -44,9 +44,9 @@ protected:
|
||||
UInt64 reserved;
|
||||
|
||||
public:
|
||||
AggregateFunctionTopK(UInt64 threshold, UInt64 load_factor, const DataTypes & argument_types_, const Array & params)
|
||||
AggregateFunctionTopK(UInt64 threshold_, UInt64 load_factor, const DataTypes & argument_types_, const Array & params)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionTopKData<T>, AggregateFunctionTopK<T, is_weighted>>(argument_types_, params)
|
||||
, threshold(threshold), reserved(load_factor * threshold) {}
|
||||
, threshold(threshold_), reserved(load_factor * threshold) {}
|
||||
|
||||
String getName() const override { return is_weighted ? "topKWeighted" : "topK"; }
|
||||
|
||||
@ -139,9 +139,9 @@ private:
|
||||
|
||||
public:
|
||||
AggregateFunctionTopKGeneric(
|
||||
UInt64 threshold, UInt64 load_factor, const DataTypePtr & input_data_type, const Array & params)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionTopKGenericData, AggregateFunctionTopKGeneric<is_plain_column, is_weighted>>({input_data_type}, params)
|
||||
, threshold(threshold), reserved(load_factor * threshold), input_data_type(this->argument_types[0]) {}
|
||||
UInt64 threshold_, UInt64 load_factor, const DataTypePtr & input_data_type_, const Array & params)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionTopKGenericData, AggregateFunctionTopKGeneric<is_plain_column, is_weighted>>({input_data_type_}, params)
|
||||
, threshold(threshold_), reserved(load_factor * threshold), input_data_type(this->argument_types[0]) {}
|
||||
|
||||
String getName() const override { return is_weighted ? "topKWeighted" : "topK"; }
|
||||
|
||||
|
@ -136,9 +136,9 @@ private:
|
||||
UInt8 threshold;
|
||||
|
||||
public:
|
||||
AggregateFunctionUniqUpTo(UInt8 threshold, const DataTypes & argument_types_, const Array & params_)
|
||||
AggregateFunctionUniqUpTo(UInt8 threshold_, const DataTypes & argument_types_, const Array & params_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionUniqUpToData<T>, AggregateFunctionUniqUpTo<T>>(argument_types_, params_)
|
||||
, threshold(threshold)
|
||||
, threshold(threshold_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -196,9 +196,9 @@ private:
|
||||
UInt8 threshold;
|
||||
|
||||
public:
|
||||
AggregateFunctionUniqUpToVariadic(const DataTypes & arguments, const Array & params, UInt8 threshold)
|
||||
AggregateFunctionUniqUpToVariadic(const DataTypes & arguments, const Array & params, UInt8 threshold_)
|
||||
: IAggregateFunctionDataHelper<AggregateFunctionUniqUpToData<UInt64>, AggregateFunctionUniqUpToVariadic<is_exact, argument_is_tuple>>(arguments, params)
|
||||
, threshold(threshold)
|
||||
, threshold(threshold_)
|
||||
{
|
||||
if (argument_is_tuple)
|
||||
num_args = typeid_cast<const DataTypeTuple &>(*arguments[0]).getElements().size();
|
||||
|
@ -128,6 +128,15 @@ public:
|
||||
using AddFunc = void (*)(const IAggregateFunction *, AggregateDataPtr, const IColumn **, size_t, Arena *);
|
||||
virtual AddFunc getAddressOfAddFunction() const = 0;
|
||||
|
||||
/** Contains a loop with calls to "add" function. You can collect arguments into array "places"
|
||||
* and do a single call to "addBatch" for devirtualization and inlining.
|
||||
*/
|
||||
virtual void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const = 0;
|
||||
|
||||
/** The same for single place.
|
||||
*/
|
||||
virtual void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const = 0;
|
||||
|
||||
/** This is used for runtime code generation to determine, which header files to include in generated source.
|
||||
* Always implement it as
|
||||
* const char * getHeaderFilePath() const override { return __FILE__; }
|
||||
@ -156,7 +165,20 @@ private:
|
||||
public:
|
||||
IAggregateFunctionHelper(const DataTypes & argument_types_, const Array & parameters_)
|
||||
: IAggregateFunction(argument_types_, parameters_) {}
|
||||
|
||||
AddFunc getAddressOfAddFunction() const override { return &addFree; }
|
||||
|
||||
void addBatch(size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < batch_size; ++i)
|
||||
static_cast<const Derived *>(this)->add(places[i] + place_offset, columns, i, arena);
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < batch_size; ++i)
|
||||
static_cast<const Derived *>(this)->add(place, columns, i, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -50,9 +50,9 @@ class QuantileTDigest
|
||||
|
||||
Centroid() = default;
|
||||
|
||||
explicit Centroid(Value mean, Count count)
|
||||
: mean(mean)
|
||||
, count(count)
|
||||
explicit Centroid(Value mean_, Count count_)
|
||||
: mean(mean_)
|
||||
, count(count_)
|
||||
{}
|
||||
|
||||
Centroid & operator+=(const Centroid & other)
|
||||
|
@ -53,8 +53,8 @@ class ReservoirSamplerDeterministic
|
||||
}
|
||||
|
||||
public:
|
||||
ReservoirSamplerDeterministic(const size_t sample_count = DEFAULT_SAMPLE_COUNT)
|
||||
: sample_count{sample_count}
|
||||
ReservoirSamplerDeterministic(const size_t sample_count_ = DEFAULT_SAMPLE_COUNT)
|
||||
: sample_count{sample_count_}
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,8 @@ namespace ErrorCodes
|
||||
extern const int SIZES_OF_COLUMNS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
ColumnConst::ColumnConst(const ColumnPtr & data_, size_t s)
|
||||
: data(data_), s(s)
|
||||
ColumnConst::ColumnConst(const ColumnPtr & data_, size_t s_)
|
||||
: data(data_), s(s_)
|
||||
{
|
||||
/// Squash Const of Const.
|
||||
while (const ColumnConst * const_data = typeid_cast<const ColumnConst *>(data.get()))
|
||||
|
@ -26,7 +26,7 @@ private:
|
||||
WrappedPtr data;
|
||||
size_t s;
|
||||
|
||||
ColumnConst(const ColumnPtr & data, size_t s);
|
||||
ColumnConst(const ColumnPtr & data, size_t s_);
|
||||
ColumnConst(const ColumnConst & src) = default;
|
||||
|
||||
public:
|
||||
|
@ -13,8 +13,8 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
ColumnFunction::ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture)
|
||||
: size_(size), function(function)
|
||||
ColumnFunction::ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture)
|
||||
: size_(size), function(function_)
|
||||
{
|
||||
appendArguments(columns_to_capture);
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ class ColumnFunction final : public COWHelper<IColumn, ColumnFunction>
|
||||
private:
|
||||
friend class COWHelper<IColumn, ColumnFunction>;
|
||||
|
||||
ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture);
|
||||
ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture);
|
||||
|
||||
public:
|
||||
const char * getFamilyName() const override { return "Function"; }
|
||||
|
@ -360,12 +360,12 @@ bool ColumnLowCardinality::containsNull() const
|
||||
|
||||
ColumnLowCardinality::Index::Index() : positions(ColumnUInt8::create()), size_of_type(sizeof(UInt8)) {}
|
||||
|
||||
ColumnLowCardinality::Index::Index(MutableColumnPtr && positions) : positions(std::move(positions))
|
||||
ColumnLowCardinality::Index::Index(MutableColumnPtr && positions_) : positions(std::move(positions_))
|
||||
{
|
||||
updateSizeOfType();
|
||||
}
|
||||
|
||||
ColumnLowCardinality::Index::Index(ColumnPtr positions) : positions(std::move(positions))
|
||||
ColumnLowCardinality::Index::Index(ColumnPtr positions_) : positions(std::move(positions_))
|
||||
{
|
||||
updateSizeOfType();
|
||||
}
|
||||
|
@ -201,8 +201,8 @@ public:
|
||||
public:
|
||||
Index();
|
||||
Index(const Index & other) = default;
|
||||
explicit Index(MutableColumnPtr && positions);
|
||||
explicit Index(ColumnPtr positions);
|
||||
explicit Index(MutableColumnPtr && positions_);
|
||||
explicit Index(ColumnPtr positions_);
|
||||
|
||||
const ColumnPtr & getPositions() const { return positions; }
|
||||
WrappedPtr & getPositionsPtr() { return positions; }
|
||||
|
@ -257,8 +257,8 @@ struct ColumnTuple::Less
|
||||
TupleColumns columns;
|
||||
int nan_direction_hint;
|
||||
|
||||
Less(const TupleColumns & columns, int nan_direction_hint_)
|
||||
: columns(columns), nan_direction_hint(nan_direction_hint_)
|
||||
Less(const TupleColumns & columns_, int nan_direction_hint_)
|
||||
: columns(columns_), nan_direction_hint(nan_direction_hint_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -186,10 +186,10 @@ ColumnUnique<ColumnType>::ColumnUnique(const IDataType & type)
|
||||
}
|
||||
|
||||
template <typename ColumnType>
|
||||
ColumnUnique<ColumnType>::ColumnUnique(MutableColumnPtr && holder, bool is_nullable)
|
||||
ColumnUnique<ColumnType>::ColumnUnique(MutableColumnPtr && holder, bool is_nullable_)
|
||||
: column_holder(std::move(holder))
|
||||
, is_nullable(is_nullable)
|
||||
, index(numSpecialValues(is_nullable), 0)
|
||||
, is_nullable(is_nullable_)
|
||||
, index(numSpecialValues(is_nullable_), 0)
|
||||
{
|
||||
if (column_holder->size() < numSpecialValues())
|
||||
throw Exception("Too small holder column for ColumnUnique.", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
@ -235,8 +235,8 @@ template <typename IndexType, typename ColumnType>
|
||||
class ReverseIndex
|
||||
{
|
||||
public:
|
||||
explicit ReverseIndex(UInt64 num_prefix_rows_to_skip, UInt64 base_index)
|
||||
: num_prefix_rows_to_skip(num_prefix_rows_to_skip), base_index(base_index), saved_hash_ptr(nullptr) {}
|
||||
explicit ReverseIndex(UInt64 num_prefix_rows_to_skip_, UInt64 base_index_)
|
||||
: num_prefix_rows_to_skip(num_prefix_rows_to_skip_), base_index(base_index_), saved_hash_ptr(nullptr) {}
|
||||
|
||||
void setColumn(ColumnType * column_);
|
||||
|
||||
|
@ -265,13 +265,12 @@ using Allocator = AllocatorWithHint<clear_memory, AllocatorHints::DefaultHint, M
|
||||
#endif
|
||||
|
||||
/** Allocator with optimization to place small memory ranges in automatic memory.
|
||||
* TODO alignment
|
||||
*/
|
||||
template <typename Base, size_t N = 64>
|
||||
template <typename Base, size_t N = 64, size_t Alignment = 1>
|
||||
class AllocatorWithStackMemory : private Base
|
||||
{
|
||||
private:
|
||||
char stack_memory[N];
|
||||
alignas(Alignment) char stack_memory[N];
|
||||
|
||||
public:
|
||||
/// Do not use boost::noncopyable to avoid the warning about direct base
|
||||
@ -291,7 +290,7 @@ public:
|
||||
return stack_memory;
|
||||
}
|
||||
|
||||
return Base::alloc(size);
|
||||
return Base::alloc(size, Alignment);
|
||||
}
|
||||
|
||||
void free(void * buf, size_t size)
|
||||
@ -308,10 +307,10 @@ public:
|
||||
|
||||
/// Already was big enough to not fit in stack_memory.
|
||||
if (old_size > N)
|
||||
return Base::realloc(buf, old_size, new_size);
|
||||
return Base::realloc(buf, old_size, new_size, Alignment);
|
||||
|
||||
/// Was in stack memory, but now will not fit there.
|
||||
void * new_buf = Base::alloc(new_size);
|
||||
void * new_buf = Base::alloc(new_size, Alignment);
|
||||
memcpy(new_buf, buf, old_size);
|
||||
return new_buf;
|
||||
}
|
||||
|
@ -243,11 +243,11 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod
|
||||
throw Exception("Cache wasn't created for HashMethodSingleLowCardinalityColumn",
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
LowCardinalityDictionaryCache * cache;
|
||||
LowCardinalityDictionaryCache * lcd_cache;
|
||||
if constexpr (use_cache)
|
||||
{
|
||||
cache = typeid_cast<LowCardinalityDictionaryCache *>(context.get());
|
||||
if (!cache)
|
||||
lcd_cache = typeid_cast<LowCardinalityDictionaryCache *>(context.get());
|
||||
if (!lcd_cache)
|
||||
{
|
||||
const auto & cached_val = *context;
|
||||
throw Exception("Invalid type for HashMethodSingleLowCardinalityColumn cache: "
|
||||
@ -267,7 +267,7 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod
|
||||
{
|
||||
dictionary_key = {column->getDictionary().getHash(), dict->size()};
|
||||
if constexpr (use_cache)
|
||||
cached_values = cache->get(dictionary_key);
|
||||
cached_values = lcd_cache->get(dictionary_key);
|
||||
}
|
||||
|
||||
if (cached_values)
|
||||
@ -288,7 +288,7 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod
|
||||
cached_values->saved_hash = saved_hash;
|
||||
cached_values->dictionary_holder = dictionary_holder;
|
||||
|
||||
cache->set(dictionary_key, cached_values);
|
||||
lcd_cache->set(dictionary_key, cached_values);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -470,8 +470,8 @@ struct HashMethodKeysFixed
|
||||
Sizes key_sizes;
|
||||
size_t keys_size;
|
||||
|
||||
HashMethodKeysFixed(const ColumnRawPtrs & key_columns, const Sizes & key_sizes, const HashMethodContextPtr &)
|
||||
: Base(key_columns), key_sizes(std::move(key_sizes)), keys_size(key_columns.size())
|
||||
HashMethodKeysFixed(const ColumnRawPtrs & key_columns, const Sizes & key_sizes_, const HashMethodContextPtr &)
|
||||
: Base(key_columns), key_sizes(std::move(key_sizes_)), keys_size(key_columns.size())
|
||||
{
|
||||
if constexpr (has_low_cardinality)
|
||||
{
|
||||
@ -525,8 +525,8 @@ struct HashMethodSerialized
|
||||
ColumnRawPtrs key_columns;
|
||||
size_t keys_size;
|
||||
|
||||
HashMethodSerialized(const ColumnRawPtrs & key_columns, const Sizes & /*key_sizes*/, const HashMethodContextPtr &)
|
||||
: key_columns(key_columns), keys_size(key_columns.size()) {}
|
||||
HashMethodSerialized(const ColumnRawPtrs & key_columns_, const Sizes & /*key_sizes*/, const HashMethodContextPtr &)
|
||||
: key_columns(key_columns_), keys_size(key_columns_.size()) {}
|
||||
|
||||
protected:
|
||||
friend class columns_hashing_impl::HashMethodBase<Self, Value, Mapped, false>;
|
||||
@ -550,8 +550,8 @@ struct HashMethodHashed
|
||||
|
||||
ColumnRawPtrs key_columns;
|
||||
|
||||
HashMethodHashed(ColumnRawPtrs key_columns, const Sizes &, const HashMethodContextPtr &)
|
||||
: key_columns(std::move(key_columns)) {}
|
||||
HashMethodHashed(ColumnRawPtrs key_columns_, const Sizes &, const HashMethodContextPtr &)
|
||||
: key_columns(std::move(key_columns_)) {}
|
||||
|
||||
ALWAYS_INLINE Key getKey(size_t row, Arena &) const { return hash128(row, key_columns.size(), key_columns); }
|
||||
|
||||
|
@ -56,8 +56,8 @@ class EmplaceResultImpl
|
||||
bool inserted;
|
||||
|
||||
public:
|
||||
EmplaceResultImpl(Mapped & value, Mapped & cached_value, bool inserted)
|
||||
: value(value), cached_value(cached_value), inserted(inserted) {}
|
||||
EmplaceResultImpl(Mapped & value_, Mapped & cached_value_, bool inserted_)
|
||||
: value(value_), cached_value(cached_value_), inserted(inserted_) {}
|
||||
|
||||
bool isInserted() const { return inserted; }
|
||||
auto & getMapped() const { return value; }
|
||||
@ -75,7 +75,7 @@ class EmplaceResultImpl<void>
|
||||
bool inserted;
|
||||
|
||||
public:
|
||||
explicit EmplaceResultImpl(bool inserted) : inserted(inserted) {}
|
||||
explicit EmplaceResultImpl(bool inserted_) : inserted(inserted_) {}
|
||||
bool isInserted() const { return inserted; }
|
||||
};
|
||||
|
||||
@ -86,7 +86,7 @@ class FindResultImpl
|
||||
bool found;
|
||||
|
||||
public:
|
||||
FindResultImpl(Mapped * value, bool found) : value(value), found(found) {}
|
||||
FindResultImpl(Mapped * value_, bool found_) : value(value_), found(found_) {}
|
||||
bool isFound() const { return found; }
|
||||
Mapped & getMapped() const { return *value; }
|
||||
};
|
||||
@ -97,7 +97,7 @@ class FindResultImpl<void>
|
||||
bool found;
|
||||
|
||||
public:
|
||||
explicit FindResultImpl(bool found) : found(found) {}
|
||||
explicit FindResultImpl(bool found_) : found(found_) {}
|
||||
bool isFound() const { return found; }
|
||||
};
|
||||
|
||||
|
@ -59,15 +59,15 @@ namespace CurrentMetrics
|
||||
std::atomic<Value> * what;
|
||||
Value amount;
|
||||
|
||||
Increment(std::atomic<Value> * what, Value amount)
|
||||
: what(what), amount(amount)
|
||||
Increment(std::atomic<Value> * what_, Value amount_)
|
||||
: what(what_), amount(amount_)
|
||||
{
|
||||
*what += amount;
|
||||
}
|
||||
|
||||
public:
|
||||
Increment(Metric metric, Value amount = 1)
|
||||
: Increment(&values[metric], amount) {}
|
||||
Increment(Metric metric, Value amount_ = 1)
|
||||
: Increment(&values[metric], amount_) {}
|
||||
|
||||
~Increment()
|
||||
{
|
||||
|
@ -26,6 +26,11 @@ void CurrentThread::updatePerformanceCounters()
|
||||
current_thread->updatePerformanceCounters();
|
||||
}
|
||||
|
||||
bool CurrentThread::isInitialized()
|
||||
{
|
||||
return current_thread;
|
||||
}
|
||||
|
||||
ThreadStatus & CurrentThread::get()
|
||||
{
|
||||
if (unlikely(!current_thread))
|
||||
|
@ -33,6 +33,9 @@ class InternalTextLogsQueue;
|
||||
class CurrentThread
|
||||
{
|
||||
public:
|
||||
/// Return true in case of successful initializaiton
|
||||
static bool isInitialized();
|
||||
|
||||
/// Handler to current thread
|
||||
static ThreadStatus & get();
|
||||
|
||||
|
@ -55,8 +55,8 @@ Elf::Elf(const std::string & path)
|
||||
}
|
||||
|
||||
|
||||
Elf::Section::Section(const ElfShdr & header, const Elf & elf)
|
||||
: header(header), elf(elf)
|
||||
Elf::Section::Section(const ElfShdr & header_, const Elf & elf_)
|
||||
: header(header_), elf(elf_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
const char * end() const;
|
||||
size_t size() const;
|
||||
|
||||
Section(const ElfShdr & header, const Elf & elf);
|
||||
Section(const ElfShdr & header_, const Elf & elf_);
|
||||
|
||||
private:
|
||||
const Elf & elf;
|
||||
|
@ -442,8 +442,9 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_PARSE_DWARF = 465;
|
||||
extern const int INSECURE_PATH = 466;
|
||||
extern const int CANNOT_PARSE_BOOL = 467;
|
||||
extern const int SETTINGS_ARE_NOT_SUPPORTED = 468;
|
||||
extern const int IMMUTABLE_SETTING = 469;
|
||||
extern const int CANNOT_PTHREAD_ATTR = 468;
|
||||
extern const int SETTINGS_ARE_NOT_SUPPORTED = 469;
|
||||
extern const int IMMUTABLE_SETTING = 470;
|
||||
|
||||
extern const int KEEPER_EXCEPTION = 999;
|
||||
extern const int POCO_EXCEPTION = 1000;
|
||||
|
@ -167,7 +167,7 @@ String FieldVisitorToString::operator() (const Tuple & x_def) const
|
||||
}
|
||||
|
||||
|
||||
FieldVisitorHash::FieldVisitorHash(SipHash & hash) : hash(hash) {}
|
||||
FieldVisitorHash::FieldVisitorHash(SipHash & hash_) : hash(hash_) {}
|
||||
|
||||
void FieldVisitorHash::operator() (const Null &) const
|
||||
{
|
||||
|
@ -222,7 +222,7 @@ class FieldVisitorHash : public StaticVisitor<>
|
||||
private:
|
||||
SipHash & hash;
|
||||
public:
|
||||
FieldVisitorHash(SipHash & hash);
|
||||
FieldVisitorHash(SipHash & hash_);
|
||||
|
||||
void operator() (const Null & x) const;
|
||||
void operator() (const UInt64 & x) const;
|
||||
|
@ -31,9 +31,9 @@ class MemoryTracker
|
||||
const char * description = nullptr;
|
||||
|
||||
public:
|
||||
MemoryTracker(VariableContext level = VariableContext::Thread) : level(level) {}
|
||||
MemoryTracker(Int64 limit_, VariableContext level = VariableContext::Thread) : limit(limit_), level(level) {}
|
||||
MemoryTracker(MemoryTracker * parent_, VariableContext level = VariableContext::Thread) : parent(parent_), level(level) {}
|
||||
MemoryTracker(VariableContext level_ = VariableContext::Thread) : level(level_) {}
|
||||
MemoryTracker(Int64 limit_, VariableContext level_ = VariableContext::Thread) : limit(limit_), level(level_) {}
|
||||
MemoryTracker(MemoryTracker * parent_, VariableContext level_ = VariableContext::Thread) : parent(parent_), level(level_) {}
|
||||
|
||||
~MemoryTracker();
|
||||
|
||||
|
@ -636,6 +636,6 @@ using PaddedPODArray = PODArray<T, initial_bytes, TAllocator, 15, 16>;
|
||||
template <typename T, size_t inline_bytes,
|
||||
size_t rounded_bytes = integerRoundUp(inline_bytes, sizeof(T))>
|
||||
using PODArrayWithStackMemory = PODArray<T, rounded_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, rounded_bytes>>;
|
||||
AllocatorWithStackMemory<Allocator<false>, rounded_bytes, alignof(T)>>;
|
||||
|
||||
}
|
||||
|
@ -191,10 +191,10 @@ Counters global_counters(global_counters_array);
|
||||
const Event Counters::num_counters = END;
|
||||
|
||||
|
||||
Counters::Counters(VariableContext level, Counters * parent)
|
||||
Counters::Counters(VariableContext level_, Counters * parent_)
|
||||
: counters_holder(new Counter[num_counters] {}),
|
||||
parent(parent),
|
||||
level(level)
|
||||
parent(parent_),
|
||||
level(level_)
|
||||
{
|
||||
counters = counters_holder.get();
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ namespace ProfileEvents
|
||||
VariableContext level = VariableContext::Thread;
|
||||
|
||||
/// By default, any instance have to increment global counters
|
||||
Counters(VariableContext level = VariableContext::Thread, Counters * parent = &global_counters);
|
||||
Counters(VariableContext level_ = VariableContext::Thread, Counters * parent_ = &global_counters);
|
||||
|
||||
/// Global level static initializer
|
||||
Counters(Counter * allocated_counters)
|
||||
|
@ -127,9 +127,9 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
template <typename ProfilerImpl>
|
||||
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal)
|
||||
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal_)
|
||||
: log(&Logger::get("QueryProfiler"))
|
||||
, pause_signal(pause_signal)
|
||||
, pause_signal(pause_signal_)
|
||||
{
|
||||
#if USE_INTERNAL_UNWIND_LIBRARY
|
||||
/// Sanity check.
|
||||
|
@ -35,7 +35,7 @@ template <typename ProfilerImpl>
|
||||
class QueryProfilerBase
|
||||
{
|
||||
public:
|
||||
QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal);
|
||||
QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal_);
|
||||
~QueryProfilerBase();
|
||||
|
||||
private:
|
||||
|
@ -161,9 +161,9 @@ RWLockImpl::LockHolderImpl::~LockHolderImpl()
|
||||
}
|
||||
|
||||
|
||||
RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent, RWLockImpl::GroupsContainer::iterator it_group,
|
||||
RWLockImpl::ClientsContainer::iterator it_client)
|
||||
: parent{std::move(parent)}, it_group{it_group}, it_client{it_client},
|
||||
RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent_, RWLockImpl::GroupsContainer::iterator it_group_,
|
||||
RWLockImpl::ClientsContainer::iterator it_client_)
|
||||
: parent{std::move(parent_)}, it_group{it_group_}, it_client{it_client_},
|
||||
active_client_increment{(*it_client == RWLockImpl::Read) ? CurrentMetrics::RWLockActiveReaders
|
||||
: CurrentMetrics::RWLockActiveWriters}
|
||||
{}
|
||||
|
@ -68,7 +68,7 @@ private:
|
||||
|
||||
std::condition_variable cv; /// all clients of the group wait group condvar
|
||||
|
||||
explicit Group(Type type) : type{type} {}
|
||||
explicit Group(Type type_) : type{type_} {}
|
||||
};
|
||||
|
||||
mutable std::mutex mutex;
|
||||
|
@ -34,13 +34,13 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_CREATE_CHILD_PROCESS;
|
||||
}
|
||||
|
||||
ShellCommand::ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_)
|
||||
: pid(pid)
|
||||
ShellCommand::ShellCommand(pid_t pid_, int in_fd_, int out_fd_, int err_fd_, bool terminate_in_destructor_)
|
||||
: pid(pid_)
|
||||
, terminate_in_destructor(terminate_in_destructor_)
|
||||
, log(&Poco::Logger::get("ShellCommand"))
|
||||
, in(in_fd)
|
||||
, out(out_fd)
|
||||
, err(err_fd) {}
|
||||
, in(in_fd_)
|
||||
, out(out_fd_)
|
||||
, err(err_fd_) {}
|
||||
|
||||
ShellCommand::~ShellCommand()
|
||||
{
|
||||
|
@ -32,7 +32,7 @@ private:
|
||||
|
||||
Poco::Logger * log;
|
||||
|
||||
ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_);
|
||||
ShellCommand(pid_t pid_, int in_fd_, int out_fd_, int err_fd_, bool terminate_in_destructor_);
|
||||
|
||||
static std::unique_ptr<ShellCommand> executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only, bool terminate_in_destructor);
|
||||
|
||||
|
@ -113,7 +113,8 @@ public:
|
||||
}
|
||||
|
||||
TKey key;
|
||||
size_t slot, hash;
|
||||
size_t slot;
|
||||
size_t hash;
|
||||
UInt64 count;
|
||||
UInt64 error;
|
||||
};
|
||||
@ -147,15 +148,13 @@ public:
|
||||
void insert(const TKey & key, UInt64 increment = 1, UInt64 error = 0)
|
||||
{
|
||||
// Increase weight of a key that already exists
|
||||
// It uses hashtable for both value mapping as a presence test (c_i != 0)
|
||||
auto hash = counter_map.hash(key);
|
||||
auto it = counter_map.find(key, hash);
|
||||
if (it != counter_map.end())
|
||||
auto counter = findCounter(key, hash);
|
||||
if (counter)
|
||||
{
|
||||
auto c = it->getSecond();
|
||||
c->count += increment;
|
||||
c->error += error;
|
||||
percolate(c);
|
||||
counter->count += increment;
|
||||
counter->error += error;
|
||||
percolate(counter);
|
||||
return;
|
||||
}
|
||||
// Key doesn't exist, but can fit in the top K
|
||||
@ -177,6 +176,7 @@ public:
|
||||
push(new Counter(arena.emplace(key), increment, error, hash));
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t alpha_mask = alpha_map.size() - 1;
|
||||
auto & alpha = alpha_map[hash & alpha_mask];
|
||||
if (alpha + increment < min->count)
|
||||
@ -187,22 +187,9 @@ public:
|
||||
|
||||
// Erase the current minimum element
|
||||
alpha_map[min->hash & alpha_mask] = min->count;
|
||||
it = counter_map.find(min->key, min->hash);
|
||||
destroyLastElement();
|
||||
|
||||
// Replace minimum with newly inserted element
|
||||
if (it != counter_map.end())
|
||||
{
|
||||
arena.free(min->key);
|
||||
min->hash = hash;
|
||||
min->key = arena.emplace(key);
|
||||
min->count = alpha + increment;
|
||||
min->error = alpha + error;
|
||||
percolate(min);
|
||||
|
||||
it->getSecond() = min;
|
||||
it->getFirstMutable() = min->key;
|
||||
counter_map.reinsert(it, hash);
|
||||
}
|
||||
push(new Counter(arena.emplace(key), alpha + increment, alpha + error, hash));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -242,17 +229,35 @@ public:
|
||||
// The list is sorted in descending order, we have to scan in reverse
|
||||
for (auto counter : boost::adaptors::reverse(rhs.counter_list))
|
||||
{
|
||||
if (counter_map.find(counter->key) != counter_map.end())
|
||||
size_t hash = counter_map.hash(counter->key);
|
||||
if (auto current = findCounter(counter->key, hash))
|
||||
{
|
||||
// Subtract m2 previously added, guaranteed not negative
|
||||
insert(counter->key, counter->count - m2, counter->error - m2);
|
||||
current->count += (counter->count - m2);
|
||||
current->error += (counter->error - m2);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Counters not monitored in S1
|
||||
insert(counter->key, counter->count + m1, counter->error + m1);
|
||||
counter_list.push_back(new Counter(arena.emplace(counter->key), counter->count + m1, counter->error + m1, hash));
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(counter_list.begin(), counter_list.end(), [](Counter * l, Counter * r) { return *l > *r; });
|
||||
|
||||
if (counter_list.size() > m_capacity)
|
||||
{
|
||||
for (size_t i = m_capacity; i < counter_list.size(); ++i)
|
||||
{
|
||||
arena.free(counter_list[i]->key);
|
||||
delete counter_list[i];
|
||||
}
|
||||
counter_list.resize(m_capacity);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < counter_list.size(); ++i)
|
||||
counter_list[i]->slot = i;
|
||||
rebuildCounterMap();
|
||||
}
|
||||
|
||||
std::vector<Counter> topK(size_t k) const
|
||||
@ -336,7 +341,10 @@ private:
|
||||
void destroyElements()
|
||||
{
|
||||
for (auto counter : counter_list)
|
||||
{
|
||||
arena.free(counter->key);
|
||||
delete counter;
|
||||
}
|
||||
|
||||
counter_map.clear();
|
||||
counter_list.clear();
|
||||
@ -346,19 +354,40 @@ private:
|
||||
void destroyLastElement()
|
||||
{
|
||||
auto last_element = counter_list.back();
|
||||
auto cell = counter_map.find(last_element->key, last_element->hash);
|
||||
cell->setZero();
|
||||
counter_map.reinsert(cell, last_element->hash);
|
||||
counter_list.pop_back();
|
||||
arena.free(last_element->key);
|
||||
delete last_element;
|
||||
counter_list.pop_back();
|
||||
|
||||
++removed_keys;
|
||||
if (removed_keys * 2 > counter_map.size())
|
||||
rebuildCounterMap();
|
||||
}
|
||||
|
||||
HashMap<TKey, Counter *, Hash, Grower, Allocator> counter_map;
|
||||
Counter * findCounter(const TKey & key, size_t hash)
|
||||
{
|
||||
auto it = counter_map.find(key, hash);
|
||||
if (it == counter_map.end())
|
||||
return nullptr;
|
||||
|
||||
return it->getSecond();
|
||||
}
|
||||
|
||||
void rebuildCounterMap()
|
||||
{
|
||||
removed_keys = 0;
|
||||
counter_map.clear();
|
||||
for (auto counter : counter_list)
|
||||
counter_map[counter->key] = counter;
|
||||
}
|
||||
|
||||
using CounterMap = HashMap<TKey, Counter *, Hash, Grower, Allocator>;
|
||||
|
||||
CounterMap counter_map;
|
||||
std::vector<Counter *> counter_list;
|
||||
std::vector<UInt64> alpha_map;
|
||||
SpaceSavingArena<TKey> arena;
|
||||
size_t m_capacity;
|
||||
size_t removed_keys = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
|
||||
operator bool() const { return parent != nullptr; }
|
||||
|
||||
Lock(AtomicStopwatch * parent) : parent(parent) {}
|
||||
Lock(AtomicStopwatch * parent_) : parent(parent_) {}
|
||||
|
||||
Lock(Lock &&) = default;
|
||||
|
||||
|
@ -75,8 +75,8 @@ private:
|
||||
#endif
|
||||
|
||||
public:
|
||||
StringSearcher(const char * const needle_, const size_t needle_size)
|
||||
: needle{reinterpret_cast<const UInt8 *>(needle_)}, needle_size{needle_size}
|
||||
StringSearcher(const char * const needle_, const size_t needle_size_)
|
||||
: needle{reinterpret_cast<const UInt8 *>(needle_)}, needle_size{needle_size_}
|
||||
{
|
||||
if (0 == needle_size)
|
||||
return;
|
||||
@ -714,8 +714,8 @@ struct LibCASCIICaseSensitiveStringSearcher
|
||||
{
|
||||
const char * const needle;
|
||||
|
||||
LibCASCIICaseSensitiveStringSearcher(const char * const needle, const size_t /* needle_size */)
|
||||
: needle(needle) {}
|
||||
LibCASCIICaseSensitiveStringSearcher(const char * const needle_, const size_t /* needle_size */)
|
||||
: needle(needle_) {}
|
||||
|
||||
const UInt8 * search(const UInt8 * haystack, const UInt8 * const haystack_end) const
|
||||
{
|
||||
@ -735,8 +735,8 @@ struct LibCASCIICaseInsensitiveStringSearcher
|
||||
{
|
||||
const char * const needle;
|
||||
|
||||
LibCASCIICaseInsensitiveStringSearcher(const char * const needle, const size_t /* needle_size */)
|
||||
: needle(needle) {}
|
||||
LibCASCIICaseInsensitiveStringSearcher(const char * const needle_, const size_t /* needle_size */)
|
||||
: needle(needle_) {}
|
||||
|
||||
const UInt8 * search(const UInt8 * haystack, const UInt8 * const haystack_end) const
|
||||
{
|
||||
|
@ -22,14 +22,14 @@ namespace CurrentMetrics
|
||||
|
||||
|
||||
template <typename Thread>
|
||||
ThreadPoolImpl<Thread>::ThreadPoolImpl(size_t max_threads)
|
||||
: ThreadPoolImpl(max_threads, max_threads, max_threads)
|
||||
ThreadPoolImpl<Thread>::ThreadPoolImpl(size_t max_threads_)
|
||||
: ThreadPoolImpl(max_threads_, max_threads_, max_threads_)
|
||||
{
|
||||
}
|
||||
|
||||
template <typename Thread>
|
||||
ThreadPoolImpl<Thread>::ThreadPoolImpl(size_t max_threads, size_t max_free_threads, size_t queue_size)
|
||||
: max_threads(max_threads), max_free_threads(max_free_threads), queue_size(queue_size)
|
||||
ThreadPoolImpl<Thread>::ThreadPoolImpl(size_t max_threads_, size_t max_free_threads_, size_t queue_size_)
|
||||
: max_threads(max_threads_), max_free_threads(max_free_threads_), queue_size(queue_size_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -31,10 +31,10 @@ public:
|
||||
using Job = std::function<void()>;
|
||||
|
||||
/// Size is constant. Up to num_threads are created on demand and then run until shutdown.
|
||||
explicit ThreadPoolImpl(size_t max_threads);
|
||||
explicit ThreadPoolImpl(size_t max_threads_);
|
||||
|
||||
/// queue_size - maximum number of running plus scheduled jobs. It can be greater than max_threads. Zero means unlimited.
|
||||
ThreadPoolImpl(size_t max_threads, size_t max_free_threads, size_t queue_size);
|
||||
ThreadPoolImpl(size_t max_threads_, size_t max_free_threads_, size_t queue_size_);
|
||||
|
||||
/// Add new job. Locks until number of scheduled jobs is less than maximum or exception in one of threads was thrown.
|
||||
/// If an exception in some thread was thrown, method silently returns, and exception will be rethrown only on call to 'wait' function.
|
||||
@ -81,8 +81,8 @@ private:
|
||||
Job job;
|
||||
int priority;
|
||||
|
||||
JobWithPriority(Job job, int priority)
|
||||
: job(job), priority(priority) {}
|
||||
JobWithPriority(Job job_, int priority_)
|
||||
: job(job_), priority(priority_) {}
|
||||
|
||||
bool operator< (const JobWithPriority & rhs) const
|
||||
{
|
||||
|
@ -36,12 +36,12 @@ namespace ErrorCodes
|
||||
class Throttler
|
||||
{
|
||||
public:
|
||||
Throttler(size_t max_speed_, const std::shared_ptr<Throttler> & parent = nullptr)
|
||||
: max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent) {}
|
||||
Throttler(size_t max_speed_, const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent_) {}
|
||||
|
||||
Throttler(size_t max_speed_, size_t limit_, const char * limit_exceeded_exception_message_,
|
||||
const std::shared_ptr<Throttler> & parent = nullptr)
|
||||
: max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent) {}
|
||||
const std::shared_ptr<Throttler> & parent_ = nullptr)
|
||||
: max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent_) {}
|
||||
|
||||
void add(const size_t amount)
|
||||
{
|
||||
|
@ -28,9 +28,9 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_FCNTL;
|
||||
}
|
||||
|
||||
TraceCollector::TraceCollector(std::shared_ptr<TraceLog> & trace_log)
|
||||
TraceCollector::TraceCollector(std::shared_ptr<TraceLog> & trace_log_)
|
||||
: log(&Poco::Logger::get("TraceCollector"))
|
||||
, trace_log(trace_log)
|
||||
, trace_log(trace_log_)
|
||||
{
|
||||
if (trace_log == nullptr)
|
||||
throw Exception("Invalid trace log pointer passed", ErrorCodes::NULL_POINTER_DEREFERENCE);
|
||||
|
@ -24,7 +24,7 @@ private:
|
||||
static void notifyToStop();
|
||||
|
||||
public:
|
||||
TraceCollector(std::shared_ptr<TraceLog> & trace_log);
|
||||
TraceCollector(std::shared_ptr<TraceLog> & trace_log_);
|
||||
|
||||
~TraceCollector();
|
||||
};
|
||||
|
@ -28,7 +28,7 @@ struct UInt128
|
||||
UInt64 high;
|
||||
|
||||
UInt128() = default;
|
||||
explicit UInt128(const UInt64 low, const UInt64 high) : low(low), high(high) {}
|
||||
explicit UInt128(const UInt64 low_, const UInt64 high_) : low(low_), high(high_) {}
|
||||
explicit UInt128(const UInt64 rhs) : low(rhs), high() {}
|
||||
|
||||
auto tuple() const { return std::tie(high, low); }
|
||||
|
@ -331,11 +331,11 @@ public:
|
||||
* If you specify it small enough, the fallback algorithm will be used,
|
||||
* since it is considered that it's useless to waste time initializing the hash table.
|
||||
*/
|
||||
VolnitskyBase(const char * const needle, const size_t needle_size, size_t haystack_size_hint = 0)
|
||||
: needle{reinterpret_cast<const UInt8 *>(needle)}
|
||||
, needle_size{needle_size}
|
||||
VolnitskyBase(const char * const needle_, const size_t needle_size_, size_t haystack_size_hint = 0)
|
||||
: needle{reinterpret_cast<const UInt8 *>(needle_)}
|
||||
, needle_size{needle_size_}
|
||||
, fallback{VolnitskyTraits::isFallbackNeedle(needle_size, haystack_size_hint)}
|
||||
, fallback_searcher{needle, needle_size}
|
||||
, fallback_searcher{needle_, needle_size}
|
||||
{
|
||||
if (fallback)
|
||||
return;
|
||||
|
@ -23,8 +23,8 @@ namespace ProfileEvents
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
Exception::Exception(const std::string & msg, const int32_t code, int)
|
||||
: DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code)
|
||||
Exception::Exception(const std::string & msg, const int32_t code_, int)
|
||||
: DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code_)
|
||||
{
|
||||
if (Coordination::isUserError(code))
|
||||
ProfileEvents::increment(ProfileEvents::ZooKeeperUserExceptions);
|
||||
@ -34,18 +34,18 @@ Exception::Exception(const std::string & msg, const int32_t code, int)
|
||||
ProfileEvents::increment(ProfileEvents::ZooKeeperOtherExceptions);
|
||||
}
|
||||
|
||||
Exception::Exception(const std::string & msg, const int32_t code)
|
||||
: Exception(msg + " (" + errorMessage(code) + ")", code, 0)
|
||||
Exception::Exception(const std::string & msg, const int32_t code_)
|
||||
: Exception(msg + " (" + errorMessage(code_) + ")", code_, 0)
|
||||
{
|
||||
}
|
||||
|
||||
Exception::Exception(const int32_t code)
|
||||
: Exception(errorMessage(code), code, 0)
|
||||
Exception::Exception(const int32_t code_)
|
||||
: Exception(errorMessage(code_), code_, 0)
|
||||
{
|
||||
}
|
||||
|
||||
Exception::Exception(const int32_t code, const std::string & path)
|
||||
: Exception(std::string{errorMessage(code)} + ", path: " + path, code, 0)
|
||||
Exception::Exception(const int32_t code_, const std::string & path)
|
||||
: Exception(std::string{errorMessage(code_)} + ", path: " + path, code_, 0)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -301,12 +301,12 @@ class Exception : public DB::Exception
|
||||
{
|
||||
private:
|
||||
/// Delegate constructor, used to minimize repetition; last parameter used for overload resolution.
|
||||
Exception(const std::string & msg, const int32_t code, int);
|
||||
Exception(const std::string & msg, const int32_t code_, int);
|
||||
|
||||
public:
|
||||
explicit Exception(const int32_t code);
|
||||
Exception(const std::string & msg, const int32_t code);
|
||||
Exception(const int32_t code, const std::string & path);
|
||||
explicit Exception(const int32_t code_);
|
||||
Exception(const std::string & msg, const int32_t code_);
|
||||
Exception(const int32_t code_, const std::string & path);
|
||||
Exception(const Exception & exc);
|
||||
|
||||
const char * name() const throw() override { return "Coordination::Exception"; }
|
||||
|
@ -418,8 +418,8 @@ ResponsePtr TestKeeperCheckRequest::createResponse() const { return std::make_sh
|
||||
ResponsePtr TestKeeperMultiRequest::createResponse() const { return std::make_shared<MultiResponse>(); }
|
||||
|
||||
|
||||
TestKeeper::TestKeeper(const String & root_path_, Poco::Timespan operation_timeout)
|
||||
: root_path(root_path_), operation_timeout(operation_timeout)
|
||||
TestKeeper::TestKeeper(const String & root_path_, Poco::Timespan operation_timeout_)
|
||||
: root_path(root_path_), operation_timeout(operation_timeout_)
|
||||
{
|
||||
container.emplace("/", Node());
|
||||
|
||||
|
@ -33,7 +33,7 @@ using TestKeeperRequestPtr = std::shared_ptr<TestKeeperRequest>;
|
||||
class TestKeeper : public IKeeper
|
||||
{
|
||||
public:
|
||||
TestKeeper(const String & root_path, Poco::Timespan operation_timeout);
|
||||
TestKeeper(const String & root_path_, Poco::Timespan operation_timeout_);
|
||||
~TestKeeper() override;
|
||||
|
||||
bool isExpired() const override { return expired; }
|
||||
|
@ -106,10 +106,10 @@ void ZooKeeper::init(const std::string & implementation, const std::string & hos
|
||||
throw KeeperException("Zookeeper root doesn't exist. You should create root node " + chroot + " before start.", Coordination::ZNONODE);
|
||||
}
|
||||
|
||||
ZooKeeper::ZooKeeper(const std::string & hosts, const std::string & identity, int32_t session_timeout_ms,
|
||||
int32_t operation_timeout_ms, const std::string & chroot, const std::string & implementation)
|
||||
ZooKeeper::ZooKeeper(const std::string & hosts_, const std::string & identity_, int32_t session_timeout_ms_,
|
||||
int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation)
|
||||
{
|
||||
init(implementation, hosts, identity, session_timeout_ms, operation_timeout_ms, chroot);
|
||||
init(implementation, hosts_, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_);
|
||||
}
|
||||
|
||||
struct ZooKeeperArgs
|
||||
@ -891,9 +891,9 @@ size_t KeeperMultiException::getFailedOpIndex(int32_t exception_code, const Coor
|
||||
}
|
||||
|
||||
|
||||
KeeperMultiException::KeeperMultiException(int32_t exception_code, const Coordination::Requests & requests, const Coordination::Responses & responses)
|
||||
KeeperMultiException::KeeperMultiException(int32_t exception_code, const Coordination::Requests & requests_, const Coordination::Responses & responses_)
|
||||
: KeeperException("Transaction failed", exception_code),
|
||||
requests(requests), responses(responses), failed_op_index(getFailedOpIndex(exception_code, responses))
|
||||
requests(requests_), responses(responses_), failed_op_index(getFailedOpIndex(exception_code, responses))
|
||||
{
|
||||
addMessage("Op #" + std::to_string(failed_op_index) + ", path: " + getPathForFirstFailedOp());
|
||||
}
|
||||
|
@ -52,10 +52,10 @@ class ZooKeeper
|
||||
public:
|
||||
using Ptr = std::shared_ptr<ZooKeeper>;
|
||||
|
||||
ZooKeeper(const std::string & hosts, const std::string & identity = "",
|
||||
int32_t session_timeout_ms = DEFAULT_SESSION_TIMEOUT,
|
||||
int32_t operation_timeout_ms = DEFAULT_OPERATION_TIMEOUT,
|
||||
const std::string & chroot = "",
|
||||
ZooKeeper(const std::string & hosts_, const std::string & identity_ = "",
|
||||
int32_t session_timeout_ms_ = DEFAULT_SESSION_TIMEOUT,
|
||||
int32_t operation_timeout_ms_ = DEFAULT_OPERATION_TIMEOUT,
|
||||
const std::string & chroot_ = "",
|
||||
const std::string & implementation = "zookeeper");
|
||||
|
||||
/** Config of the form:
|
||||
|
@ -758,17 +758,17 @@ struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
||||
{
|
||||
ZooKeeper::OpNum op_num;
|
||||
bool done;
|
||||
int32_t error;
|
||||
int32_t error_;
|
||||
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(error, in);
|
||||
Coordination::read(error_, in);
|
||||
|
||||
if (!done)
|
||||
throw Exception("Too many results received for multi transaction", ZMARSHALLINGERROR);
|
||||
if (op_num != -1)
|
||||
throw Exception("Unexpected op_num received at the end of results for multi transaction", ZMARSHALLINGERROR);
|
||||
if (error != -1)
|
||||
if (error_ != -1)
|
||||
throw Exception("Unexpected error value received at the end of results for multi transaction", ZMARSHALLINGERROR);
|
||||
}
|
||||
}
|
||||
@ -821,12 +821,12 @@ ZooKeeper::ZooKeeper(
|
||||
const String & root_path_,
|
||||
const String & auth_scheme,
|
||||
const String & auth_data,
|
||||
Poco::Timespan session_timeout,
|
||||
Poco::Timespan session_timeout_,
|
||||
Poco::Timespan connection_timeout,
|
||||
Poco::Timespan operation_timeout)
|
||||
Poco::Timespan operation_timeout_)
|
||||
: root_path(root_path_),
|
||||
session_timeout(session_timeout),
|
||||
operation_timeout(std::min(operation_timeout, session_timeout))
|
||||
session_timeout(session_timeout_),
|
||||
operation_timeout(std::min(operation_timeout_, session_timeout_))
|
||||
{
|
||||
if (!root_path.empty())
|
||||
{
|
||||
|
@ -108,9 +108,9 @@ public:
|
||||
const String & root_path,
|
||||
const String & auth_scheme,
|
||||
const String & auth_data,
|
||||
Poco::Timespan session_timeout,
|
||||
Poco::Timespan session_timeout_,
|
||||
Poco::Timespan connection_timeout,
|
||||
Poco::Timespan operation_timeout);
|
||||
Poco::Timespan operation_timeout_);
|
||||
|
||||
~ZooKeeper() override;
|
||||
|
||||
|
62
dbms/src/Common/checkStackSize.cpp
Normal file
62
dbms/src/Common/checkStackSize.cpp
Normal file
@ -0,0 +1,62 @@
|
||||
#include <Common/checkStackSize.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <ext/scope_guard.h>
|
||||
|
||||
#include <pthread.h>
|
||||
#include <cstdint>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_PTHREAD_ATTR;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int TOO_DEEP_RECURSION;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static thread_local void * stack_address = nullptr;
|
||||
static thread_local size_t max_stack_size = 0;
|
||||
|
||||
void checkStackSize()
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
if (!stack_address)
|
||||
{
|
||||
pthread_attr_t attr;
|
||||
if (0 != pthread_getattr_np(pthread_self(), &attr))
|
||||
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
|
||||
SCOPE_EXIT({ pthread_attr_destroy(&attr); });
|
||||
|
||||
if (0 != pthread_attr_getstack(&attr, &stack_address, &max_stack_size))
|
||||
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
}
|
||||
|
||||
const void * frame_address = __builtin_frame_address(0);
|
||||
uintptr_t int_frame_address = reinterpret_cast<uintptr_t>(frame_address);
|
||||
uintptr_t int_stack_address = reinterpret_cast<uintptr_t>(stack_address);
|
||||
|
||||
/// We assume that stack grows towards lower addresses. And that it starts to grow from the end of a chunk of memory of max_stack_size.
|
||||
if (int_frame_address > int_stack_address + max_stack_size)
|
||||
throw Exception("Logical error: frame address is greater than stack begin address", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
size_t stack_size = int_stack_address + max_stack_size - int_frame_address;
|
||||
|
||||
/// Just check if we have already eat more than a half of stack size. It's a bit overkill (a half of stack size is wasted).
|
||||
/// It's safe to assume that overflow in multiplying by two cannot occur.
|
||||
if (stack_size * 2 > max_stack_size)
|
||||
{
|
||||
std::stringstream message;
|
||||
message << "Stack size too large"
|
||||
<< ". Stack address: " << stack_address
|
||||
<< ", frame address: " << frame_address
|
||||
<< ", stack size: " << stack_size
|
||||
<< ", maximum stack size: " << max_stack_size;
|
||||
throw Exception(message.str(), ErrorCodes::TOO_DEEP_RECURSION);
|
||||
}
|
||||
}
|
7
dbms/src/Common/checkStackSize.h
Normal file
7
dbms/src/Common/checkStackSize.h
Normal file
@ -0,0 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
/** If the stack is large enough and is near its size, throw an exception.
|
||||
* You can call this function in "heavy" functions that may be called recursively
|
||||
* to prevent possible stack overflows.
|
||||
*/
|
||||
void checkStackSize();
|
13
dbms/src/Common/getExecutablePath.cpp
Normal file
13
dbms/src/Common/getExecutablePath.cpp
Normal file
@ -0,0 +1,13 @@
|
||||
#include <Common/getExecutablePath.h>
|
||||
#include <filesystem>
|
||||
|
||||
|
||||
std::string getExecutablePath()
|
||||
{
|
||||
std::error_code ec;
|
||||
std::filesystem::path canonical_path = std::filesystem::canonical("/proc/self/exe", ec);
|
||||
|
||||
if (ec)
|
||||
return {};
|
||||
return canonical_path;
|
||||
}
|
11
dbms/src/Common/getExecutablePath.h
Normal file
11
dbms/src/Common/getExecutablePath.h
Normal file
@ -0,0 +1,11 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
/** Get path to the running executable if possible.
|
||||
* It is possible when:
|
||||
* - procfs exists;
|
||||
* - there is a /proc/self/exe file;
|
||||
* Otherwise return empty string.
|
||||
*/
|
||||
std::string getExecutablePath();
|
@ -42,7 +42,7 @@ inline void writeHexByteLowercase(UInt8 byte, void * out)
|
||||
|
||||
/// Produces hex representation of an unsigned int with leading zeros (for checksums)
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table)
|
||||
inline void writeHexUIntImpl(TUInt uint_, char * out, const char * const table)
|
||||
{
|
||||
union
|
||||
{
|
||||
@ -50,7 +50,7 @@ inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table)
|
||||
UInt8 uint8[sizeof(TUInt)];
|
||||
};
|
||||
|
||||
value = uint;
|
||||
value = uint_;
|
||||
|
||||
/// Use little endian
|
||||
for (size_t i = 0; i < sizeof(TUInt); ++i)
|
||||
@ -58,30 +58,30 @@ inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table)
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntUppercase(TUInt uint, char * out)
|
||||
inline void writeHexUIntUppercase(TUInt uint_, char * out)
|
||||
{
|
||||
writeHexUIntImpl(uint, out, hex_byte_to_char_uppercase_table);
|
||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
inline void writeHexUIntLowercase(TUInt uint, char * out)
|
||||
inline void writeHexUIntLowercase(TUInt uint_, char * out)
|
||||
{
|
||||
writeHexUIntImpl(uint, out, hex_byte_to_char_lowercase_table);
|
||||
writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table);
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
std::string getHexUIntUppercase(TUInt uint)
|
||||
std::string getHexUIntUppercase(TUInt uint_)
|
||||
{
|
||||
std::string res(sizeof(TUInt) * 2, '\0');
|
||||
writeHexUIntUppercase(uint, res.data());
|
||||
writeHexUIntUppercase(uint_, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename TUInt>
|
||||
std::string getHexUIntLowercase(TUInt uint)
|
||||
std::string getHexUIntLowercase(TUInt uint_)
|
||||
{
|
||||
std::string res(sizeof(TUInt) * 2, '\0');
|
||||
writeHexUIntLowercase(uint, res.data());
|
||||
writeHexUIntLowercase(uint_, res.data());
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <malloc.h>
|
||||
#include <new>
|
||||
|
||||
#include <common/config_common.h>
|
||||
@ -49,6 +50,11 @@ ALWAYS_INLINE void untrackMemory(void * ptr [[maybe_unused]], std::size_t size [
|
||||
#else
|
||||
if (size)
|
||||
CurrentMemoryTracker::free(size);
|
||||
#ifdef _GNU_SOURCE
|
||||
/// It's innaccurate resource free for sanitizers. malloc_usable_size() result is greater or equal to allocated size.
|
||||
else
|
||||
CurrentMemoryTracker::free(malloc_usable_size(ptr));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
catch (...)
|
||||
|
@ -137,17 +137,17 @@ struct Dictionary
|
||||
|
||||
enum class AttributeUnderlyingType
|
||||
{
|
||||
UInt8,
|
||||
UInt16,
|
||||
UInt32,
|
||||
UInt64,
|
||||
Int8,
|
||||
Int16,
|
||||
Int32,
|
||||
Int64,
|
||||
Float32,
|
||||
Float64,
|
||||
String
|
||||
utUInt8,
|
||||
utUInt16,
|
||||
utUInt32,
|
||||
utUInt64,
|
||||
utInt8,
|
||||
utInt16,
|
||||
utInt32,
|
||||
utInt64,
|
||||
utFloat32,
|
||||
utFloat64,
|
||||
utString
|
||||
};
|
||||
|
||||
struct Attribute final
|
||||
@ -172,17 +172,17 @@ struct Dictionary
|
||||
{
|
||||
switch (attribute.type)
|
||||
{
|
||||
case AttributeUnderlyingType::UInt8: std::get<ContainerPtrType<UInt8>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt16: std::get<ContainerPtrType<UInt16>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt32: std::get<ContainerPtrType<UInt32>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::UInt64: std::get<ContainerPtrType<UInt64>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::Int8: std::get<ContainerPtrType<Int8>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int16: std::get<ContainerPtrType<Int16>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int32: std::get<ContainerPtrType<Int32>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Int64: std::get<ContainerPtrType<Int64>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::Float32: std::get<ContainerPtrType<Float32>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::Float64: std::get<ContainerPtrType<Float64>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::String:
|
||||
case AttributeUnderlyingType::utUInt8: std::get<ContainerPtrType<UInt8>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::utUInt16: std::get<ContainerPtrType<UInt16>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::utUInt32: std::get<ContainerPtrType<UInt32>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::utUInt64: std::get<ContainerPtrType<UInt64>>(attribute.arrays)[idx] = value.get<UInt64>(); break;
|
||||
case AttributeUnderlyingType::utInt8: std::get<ContainerPtrType<Int8>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::utInt16: std::get<ContainerPtrType<Int16>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::utInt32: std::get<ContainerPtrType<Int32>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::utInt64: std::get<ContainerPtrType<Int64>>(attribute.arrays)[idx] = value.get<Int64>(); break;
|
||||
case AttributeUnderlyingType::utFloat32: std::get<ContainerPtrType<Float32>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::utFloat64: std::get<ContainerPtrType<Float64>>(attribute.arrays)[idx] = value.get<Float64>(); break;
|
||||
case AttributeUnderlyingType::utString:
|
||||
{
|
||||
const auto & string = value.get<String>();
|
||||
auto & string_ref = std::get<ContainerPtrType<StringRef>>(attribute.arrays)[idx];
|
||||
@ -308,7 +308,7 @@ int main(int argc, char ** argv)
|
||||
constexpr size_t cache_size = 1024;
|
||||
|
||||
Dictionary::Attribute attr;
|
||||
attr.type = Dictionary::AttributeUnderlyingType::String;
|
||||
attr.type = Dictionary::AttributeUnderlyingType::utString;
|
||||
std::get<Dictionary::ContainerPtrType<StringRef>>(attr.arrays).reset(new StringRef[cache_size]{});
|
||||
|
||||
while (true)
|
||||
|
@ -28,7 +28,7 @@ private:
|
||||
friend class COWHelper<IColumn, ConcreteColumn>;
|
||||
|
||||
int data;
|
||||
ConcreteColumn(int data) : data(data) {}
|
||||
ConcreteColumn(int data_) : data(data_) {}
|
||||
ConcreteColumn(const ConcreteColumn &) = default;
|
||||
|
||||
MutableColumnPtr test() const override
|
||||
|
@ -30,7 +30,7 @@ private:
|
||||
friend class COWHelper<IColumn, ConcreteColumn>;
|
||||
|
||||
int data;
|
||||
ConcreteColumn(int data) : data(data) {}
|
||||
ConcreteColumn(int data_) : data(data_) {}
|
||||
ConcreteColumn(const ConcreteColumn &) = default;
|
||||
|
||||
public:
|
||||
|
@ -58,8 +58,8 @@ struct Allocation
|
||||
|
||||
Allocation() {}
|
||||
|
||||
Allocation(size_t size)
|
||||
: size(size)
|
||||
Allocation(size_t size_)
|
||||
: size(size_)
|
||||
{
|
||||
ptr = malloc(size);
|
||||
if (!ptr)
|
||||
|
@ -78,11 +78,12 @@ binary_value_info getLeadingAndTrailingBits(const T & value)
|
||||
const UInt8 lz = getLeadingZeroBits(value);
|
||||
const UInt8 tz = getTrailingZeroBits(value);
|
||||
const UInt8 data_size = value == 0 ? 0 : static_cast<UInt8>(bit_size - lz - tz);
|
||||
|
||||
return binary_value_info{lz, data_size, tz};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest, UInt32 dest_size)
|
||||
{
|
||||
static const auto DATA_BIT_LENGTH = getBitLengthOfLength(sizeof(T));
|
||||
// -1 since there must be at least 1 non-zero bit.
|
||||
@ -91,6 +92,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (source_size % sizeof(T) != 0)
|
||||
throw Exception("Cannot compress, data size " + toString(source_size) + " is not aligned to " + toString(sizeof(T)), ErrorCodes::CANNOT_COMPRESS);
|
||||
const char * source_end = source + source_size;
|
||||
const char * dest_end = dest + dest_size;
|
||||
|
||||
const UInt32 items_count = source_size / sizeof(T);
|
||||
|
||||
@ -110,7 +112,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
dest += sizeof(prev_value);
|
||||
}
|
||||
|
||||
WriteBuffer buffer(dest, getCompressedDataSize(sizeof(T), source_size - sizeof(items_count) - sizeof(prev_value)));
|
||||
WriteBuffer buffer(dest, dest_end - dest);
|
||||
BitWriter writer(buffer);
|
||||
|
||||
while (source < source_end)
|
||||
@ -265,24 +267,26 @@ UInt32 CompressionCodecGorilla::doCompressData(const char * source, UInt32 sourc
|
||||
dest[1] = bytes_to_skip;
|
||||
memcpy(&dest[2], source, bytes_to_skip);
|
||||
size_t start_pos = 2 + bytes_to_skip;
|
||||
UInt32 compressed_size = 0;
|
||||
UInt32 result_size = 0;
|
||||
|
||||
const UInt32 compressed_size = getMaxCompressedDataSize(source_size);
|
||||
switch (data_bytes_size)
|
||||
{
|
||||
case 1:
|
||||
compressed_size = compressDataForType<UInt8>(&source[bytes_to_skip], source_size - bytes_to_skip, &dest[start_pos]);
|
||||
result_size = compressDataForType<UInt8>(&source[bytes_to_skip], source_size - bytes_to_skip, &dest[start_pos], compressed_size);
|
||||
break;
|
||||
case 2:
|
||||
compressed_size = compressDataForType<UInt16>(&source[bytes_to_skip], source_size - bytes_to_skip, &dest[start_pos]);
|
||||
result_size = compressDataForType<UInt16>(&source[bytes_to_skip], source_size - bytes_to_skip, &dest[start_pos], compressed_size);
|
||||
break;
|
||||
case 4:
|
||||
compressed_size = compressDataForType<UInt32>(&source[bytes_to_skip], source_size - bytes_to_skip, &dest[start_pos]);
|
||||
result_size = compressDataForType<UInt32>(&source[bytes_to_skip], source_size - bytes_to_skip, &dest[start_pos], compressed_size);
|
||||
break;
|
||||
case 8:
|
||||
compressed_size = compressDataForType<UInt64>(&source[bytes_to_skip], source_size - bytes_to_skip, &dest[start_pos]);
|
||||
result_size = compressDataForType<UInt64>(&source[bytes_to_skip], source_size - bytes_to_skip, &dest[start_pos], compressed_size);
|
||||
break;
|
||||
}
|
||||
|
||||
return 1 + 1 + compressed_size;
|
||||
return 1 + 1 + result_size;
|
||||
}
|
||||
|
||||
void CompressionCodecGorilla::doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 /* uncompressed_size */) const
|
||||
|
@ -18,8 +18,8 @@ extern const int UNKNOWN_CODEC;
|
||||
extern const int CORRUPTED_DATA;
|
||||
}
|
||||
|
||||
CompressionCodecMultiple::CompressionCodecMultiple(Codecs codecs)
|
||||
: codecs(codecs)
|
||||
CompressionCodecMultiple::CompressionCodecMultiple(Codecs codecs_)
|
||||
: codecs(codecs_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@ class CompressionCodecMultiple final : public ICompressionCodec
|
||||
{
|
||||
public:
|
||||
CompressionCodecMultiple() = default;
|
||||
explicit CompressionCodecMultiple(Codecs codecs);
|
||||
explicit CompressionCodecMultiple(Codecs codecs_);
|
||||
|
||||
UInt8 getMethodByte() const override;
|
||||
|
||||
|
@ -49,8 +49,8 @@ UInt32 ICompressionCodec::decompress(const char * source, UInt32 source_size, ch
|
||||
UInt8 header_size = getHeaderSize();
|
||||
UInt32 decompressed_size = unalignedLoad<UInt32>(&source[5]);
|
||||
doDecompressData(&source[header_size], source_size - header_size, dest, decompressed_size);
|
||||
return decompressed_size;
|
||||
|
||||
return decompressed_size;
|
||||
}
|
||||
|
||||
UInt32 ICompressionCodec::readCompressedBlockSize(const char * source)
|
||||
|
@ -537,7 +537,6 @@ void decompress(
|
||||
if (source_size == 0 || dest_size == 0)
|
||||
return;
|
||||
|
||||
|
||||
/// Don't run timer if the block is too small.
|
||||
if (dest_size >= 32768)
|
||||
{
|
||||
|
@ -123,7 +123,7 @@ struct PerformanceStatistics
|
||||
}
|
||||
|
||||
PerformanceStatistics() {}
|
||||
PerformanceStatistics(ssize_t choose_method) : choose_method(choose_method) {}
|
||||
PerformanceStatistics(ssize_t choose_method_) : choose_method(choose_method_) {}
|
||||
};
|
||||
|
||||
|
||||
|
@ -1,10 +1,14 @@
|
||||
#include <Compression/CompressionCodecDoubleDelta.h>
|
||||
#include <Compression/CompressionCodecGorilla.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
|
||||
#include <Core/Types.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Core/Types.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Parsers/ExpressionElementParsers.h>
|
||||
#include <Parsers/IParser.h>
|
||||
#include <Parsers/TokenIterator.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
|
||||
@ -20,10 +24,44 @@
|
||||
|
||||
#include <string.h>
|
||||
|
||||
/// For the expansion of gtest macros.
|
||||
#if defined(__clang__)
|
||||
#pragma clang diagnostic ignored "-Wdeprecated"
|
||||
#elif defined (__GNUC__)
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-copy"
|
||||
#endif
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
namespace std
|
||||
{
|
||||
template <typename T>
|
||||
std::ostream & operator<<(std::ostream & ostr, const std::optional<T> & opt)
|
||||
{
|
||||
if (!opt)
|
||||
{
|
||||
return ostr << "<empty optional>";
|
||||
}
|
||||
|
||||
return ostr << *opt;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<T> operator+(std::vector<T> && left, std::vector<T> && right)
|
||||
{
|
||||
std::vector<T> result(std::move(left));
|
||||
std::move(std::begin(right), std::end(right), std::back_inserter(result));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
std::string bin(const T & value, size_t bits = sizeof(T)*8)
|
||||
{
|
||||
@ -37,43 +75,46 @@ std::string bin(const T & value, size_t bits = sizeof(T)*8)
|
||||
template <typename T>
|
||||
const char* type_name()
|
||||
{
|
||||
#define MAKE_TYPE_NAME(TYPE) \
|
||||
if constexpr (std::is_same_v<TYPE, T>) return #TYPE
|
||||
|
||||
MAKE_TYPE_NAME(UInt8);
|
||||
MAKE_TYPE_NAME(UInt16);
|
||||
MAKE_TYPE_NAME(UInt32);
|
||||
MAKE_TYPE_NAME(UInt64);
|
||||
MAKE_TYPE_NAME(Int8);
|
||||
MAKE_TYPE_NAME(Int16);
|
||||
MAKE_TYPE_NAME(Int32);
|
||||
MAKE_TYPE_NAME(Int64);
|
||||
MAKE_TYPE_NAME(Float32);
|
||||
MAKE_TYPE_NAME(Float64);
|
||||
|
||||
#undef MAKE_TYPE_NAME
|
||||
|
||||
return typeid(T).name();
|
||||
}
|
||||
|
||||
template <>
|
||||
const char* type_name<UInt32>()
|
||||
template <typename T>
|
||||
DataTypePtr makeDataType()
|
||||
{
|
||||
return "uint32";
|
||||
}
|
||||
#define MAKE_DATA_TYPE(TYPE) \
|
||||
if constexpr (std::is_same_v<T, TYPE>) return std::make_shared<DataType ## TYPE>()
|
||||
|
||||
template <>
|
||||
const char* type_name<Int32>()
|
||||
{
|
||||
return "int32";
|
||||
}
|
||||
MAKE_DATA_TYPE(UInt8);
|
||||
MAKE_DATA_TYPE(UInt16);
|
||||
MAKE_DATA_TYPE(UInt32);
|
||||
MAKE_DATA_TYPE(UInt64);
|
||||
MAKE_DATA_TYPE(Int8);
|
||||
MAKE_DATA_TYPE(Int16);
|
||||
MAKE_DATA_TYPE(Int32);
|
||||
MAKE_DATA_TYPE(Int64);
|
||||
MAKE_DATA_TYPE(Float32);
|
||||
MAKE_DATA_TYPE(Float64);
|
||||
|
||||
template <>
|
||||
const char* type_name<UInt64>()
|
||||
{
|
||||
return "uint64";
|
||||
}
|
||||
#undef MAKE_DATA_TYPE
|
||||
|
||||
template <>
|
||||
const char* type_name<Int64>()
|
||||
{
|
||||
return "int64";
|
||||
}
|
||||
|
||||
template <>
|
||||
const char* type_name<Float32>()
|
||||
{
|
||||
return "float";
|
||||
}
|
||||
|
||||
template <>
|
||||
const char* type_name<Float64>()
|
||||
{
|
||||
return "double";
|
||||
assert(false && "unsupported size");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -135,52 +176,100 @@ template <typename T, typename ContainerLeft, typename ContainerRight>
|
||||
return result;
|
||||
}
|
||||
|
||||
struct CodecTestParam
|
||||
struct Codec
|
||||
{
|
||||
std::string type_name;
|
||||
std::vector<char> source_data;
|
||||
UInt8 data_byte_size;
|
||||
double min_compression_ratio;
|
||||
std::string case_name;
|
||||
std::string codec_statement;
|
||||
std::optional<double> expected_compression_ratio;
|
||||
|
||||
// to allow setting ratio after building with complex builder functions.
|
||||
CodecTestParam && setRatio(const double & ratio) &&
|
||||
{
|
||||
this->min_compression_ratio = ratio;
|
||||
return std::move(*this);
|
||||
}
|
||||
explicit Codec(std::string codec_statement_, std::optional<double> expected_compression_ratio_ = std::nullopt)
|
||||
: codec_statement(std::move(codec_statement_)),
|
||||
expected_compression_ratio(expected_compression_ratio_)
|
||||
{}
|
||||
|
||||
Codec()
|
||||
: Codec(std::string())
|
||||
{}
|
||||
};
|
||||
|
||||
CodecTestParam operator+(CodecTestParam && left, CodecTestParam && right)
|
||||
|
||||
struct CodecTestSequence
|
||||
{
|
||||
assert(left.type_name == right.type_name);
|
||||
assert(left.data_byte_size == right.data_byte_size);
|
||||
std::string name;
|
||||
std::vector<char> serialized_data;
|
||||
DataTypePtr data_type;
|
||||
|
||||
std::vector data(std::move(left.source_data));
|
||||
data.insert(data.end(), right.source_data.begin(), right.source_data.end());
|
||||
CodecTestSequence()
|
||||
: name(),
|
||||
serialized_data(),
|
||||
data_type()
|
||||
{}
|
||||
|
||||
return CodecTestParam{
|
||||
left.type_name,
|
||||
std::move(data),
|
||||
left.data_byte_size,
|
||||
std::min(left.min_compression_ratio, right.min_compression_ratio),
|
||||
left.case_name + " + " + right.case_name
|
||||
CodecTestSequence(std::string name_, std::vector<char> serialized_data_, DataTypePtr data_type_)
|
||||
: name(name_),
|
||||
serialized_data(serialized_data_),
|
||||
data_type(data_type_)
|
||||
{}
|
||||
|
||||
CodecTestSequence(const CodecTestSequence &) = default;
|
||||
CodecTestSequence & operator=(const CodecTestSequence &) = default;
|
||||
CodecTestSequence(CodecTestSequence &&) = default;
|
||||
CodecTestSequence & operator=(CodecTestSequence &&) = default;
|
||||
};
|
||||
|
||||
CodecTestSequence operator+(CodecTestSequence && left, CodecTestSequence && right)
|
||||
{
|
||||
assert(left.data_type->equals(*right.data_type));
|
||||
|
||||
std::vector<char> data(std::move(left.serialized_data));
|
||||
data.insert(data.end(), right.serialized_data.begin(), right.serialized_data.end());
|
||||
|
||||
return CodecTestSequence{
|
||||
left.name + " + " + right.name,
|
||||
std::move(data),
|
||||
std::move(left.data_type)
|
||||
};
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & ostr, const CodecTestParam & param)
|
||||
template <typename T>
|
||||
CodecTestSequence operator*(CodecTestSequence && left, T times)
|
||||
{
|
||||
return ostr << "name: " << param.case_name
|
||||
<< "\ntype name:" << param.type_name
|
||||
<< "\nbyte size: " << static_cast<UInt32>(param.data_byte_size)
|
||||
<< "\ndata size: " << param.source_data.size();
|
||||
std::vector<char> data(std::move(left.serialized_data));
|
||||
const size_t initial_size = data.size();
|
||||
const size_t final_size = initial_size * times;
|
||||
|
||||
data.reserve(final_size);
|
||||
|
||||
for (T i = 0; i < times; ++i)
|
||||
{
|
||||
data.insert(data.end(), data.begin(), data.begin() + initial_size);
|
||||
}
|
||||
|
||||
return CodecTestSequence{
|
||||
left.name + " x " + std::to_string(times),
|
||||
std::move(data),
|
||||
std::move(left.data_type)
|
||||
};
|
||||
}
|
||||
|
||||
// compression ratio < 1.0 means that codec output is smaller than input.
|
||||
const double DEFAULT_MIN_COMPRESSION_RATIO = 1.0;
|
||||
std::ostream & operator<<(std::ostream & ostr, const Codec & codec)
|
||||
{
|
||||
return ostr << "Codec{"
|
||||
<< "name: " << codec.codec_statement
|
||||
<< ", expected_compression_ratio: " << codec.expected_compression_ratio
|
||||
<< "}";
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & ostr, const CodecTestSequence & seq)
|
||||
{
|
||||
return ostr << "CodecTestSequence{"
|
||||
<< "name: " << seq.name
|
||||
<< ", type name: " << seq.data_type->getName()
|
||||
<< ", data size: " << seq.serialized_data.size() << " bytes"
|
||||
<< "}";
|
||||
}
|
||||
|
||||
template <typename T, typename... Args>
|
||||
CodecTestParam makeParam(Args && ... args)
|
||||
CodecTestSequence makeSeq(Args && ... args)
|
||||
{
|
||||
std::initializer_list<T> vals{static_cast<T>(args)...};
|
||||
std::vector<char> data(sizeof(T) * std::size(vals));
|
||||
@ -192,14 +281,17 @@ CodecTestParam makeParam(Args && ... args)
|
||||
write_pos += sizeof(v);
|
||||
}
|
||||
|
||||
return CodecTestParam{type_name<T>(), std::move(data), sizeof(T), DEFAULT_MIN_COMPRESSION_RATIO,
|
||||
(boost::format("%1% values of %2%") % std::size(vals) % type_name<T>()).str()};
|
||||
return CodecTestSequence{
|
||||
(boost::format("%1% values of %2%") % std::size(vals) % type_name<T>()).str(),
|
||||
std::move(data),
|
||||
makeDataType<T>()
|
||||
};
|
||||
}
|
||||
|
||||
template <typename T, size_t Begin = 1, size_t End = 10001, typename Generator>
|
||||
CodecTestParam generateParam(Generator gen, const char* gen_name)
|
||||
template <typename T, typename Generator>
|
||||
CodecTestSequence generateSeq(Generator gen, const char* gen_name, size_t Begin = 0, size_t End = 10000)
|
||||
{
|
||||
static_assert (End >= Begin, "End must be not less than Begin");
|
||||
assert (End >= Begin);
|
||||
|
||||
std::vector<char> data(sizeof(T) * (End - Begin));
|
||||
char * write_pos = data.data();
|
||||
@ -211,89 +303,104 @@ CodecTestParam generateParam(Generator gen, const char* gen_name)
|
||||
write_pos += sizeof(v);
|
||||
}
|
||||
|
||||
return CodecTestParam{type_name<T>(), std::move(data), sizeof(T), DEFAULT_MIN_COMPRESSION_RATIO,
|
||||
(boost::format("%1% values of %2% from %3%") % (End - Begin) % type_name<T>() % gen_name).str()};
|
||||
return CodecTestSequence{
|
||||
(boost::format("%1% values of %2% from %3%") % (End - Begin) % type_name<T>() % gen_name).str(),
|
||||
std::move(data),
|
||||
makeDataType<T>()
|
||||
};
|
||||
}
|
||||
|
||||
void TestTranscoding(ICompressionCodec * codec, const CodecTestParam & param)
|
||||
{
|
||||
const auto & source_data = param.source_data;
|
||||
|
||||
const UInt32 encoded_max_size = codec->getCompressedReserveSize(source_data.size());
|
||||
PODArray<char> encoded(encoded_max_size);
|
||||
|
||||
const UInt32 encoded_size = codec->compress(source_data.data(), source_data.size(), encoded.data());
|
||||
encoded.resize(encoded_size);
|
||||
|
||||
PODArray<char> decoded(source_data.size());
|
||||
const UInt32 decoded_size = codec->decompress(encoded.data(), encoded.size(), decoded.data());
|
||||
decoded.resize(decoded_size);
|
||||
|
||||
switch (param.data_byte_size)
|
||||
{
|
||||
case 1:
|
||||
ASSERT_TRUE(EqualByteContainersAs<UInt8>(source_data, decoded));
|
||||
break;
|
||||
case 2:
|
||||
ASSERT_TRUE(EqualByteContainersAs<UInt16>(source_data, decoded));
|
||||
break;
|
||||
case 4:
|
||||
ASSERT_TRUE(EqualByteContainersAs<UInt32>(source_data, decoded));
|
||||
break;
|
||||
case 8:
|
||||
ASSERT_TRUE(EqualByteContainersAs<UInt64>(source_data, decoded));
|
||||
break;
|
||||
default:
|
||||
FAIL() << "Invalid data_byte_size: " << param.data_byte_size;
|
||||
}
|
||||
const auto header_size = codec->getHeaderSize();
|
||||
const auto compression_ratio = (encoded_size - header_size) / (source_data.size() * 1.0);
|
||||
|
||||
ASSERT_LE(compression_ratio, param.min_compression_ratio)
|
||||
<< "\n\tdecoded size: " << source_data.size()
|
||||
<< "\n\tencoded size: " << encoded_size
|
||||
<< "(no header: " << encoded_size - header_size << ")";
|
||||
}
|
||||
|
||||
class CodecTest : public ::testing::TestWithParam<CodecTestParam>
|
||||
class CodecTest : public ::testing::TestWithParam<std::tuple<Codec, CodecTestSequence>>
|
||||
{
|
||||
public:
|
||||
static void SetUpTestCase()
|
||||
enum MakeCodecParam
|
||||
{
|
||||
// To make random predicatble and avoid failing test "out of the blue".
|
||||
srand(0);
|
||||
CODEC_WITH_DATA_TYPE,
|
||||
CODEC_WITHOUT_DATA_TYPE,
|
||||
};
|
||||
|
||||
CompressionCodecPtr makeCodec(MakeCodecParam with_data_type) const
|
||||
{
|
||||
const auto & codec_string = std::get<0>(GetParam()).codec_statement;
|
||||
const auto & data_type = with_data_type == CODEC_WITH_DATA_TYPE ? std::get<1>(GetParam()).data_type : nullptr;
|
||||
|
||||
const std::string codec_statement = "(" + codec_string + ")";
|
||||
Tokens tokens(codec_statement.begin().base(), codec_statement.end().base());
|
||||
IParser::Pos token_iterator(tokens);
|
||||
|
||||
Expected expected;
|
||||
ASTPtr codec_ast;
|
||||
ParserCodec parser;
|
||||
|
||||
parser.parse(token_iterator, codec_ast, expected);
|
||||
|
||||
return CompressionCodecFactory::instance().get(codec_ast, data_type);
|
||||
}
|
||||
|
||||
void testTranscoding(ICompressionCodec & codec)
|
||||
{
|
||||
const auto & test_sequence = std::get<1>(GetParam());
|
||||
const auto & source_data = test_sequence.serialized_data;
|
||||
|
||||
const UInt32 encoded_max_size = codec.getCompressedReserveSize(source_data.size());
|
||||
PODArray<char> encoded(encoded_max_size);
|
||||
|
||||
const UInt32 encoded_size = codec.compress(source_data.data(), source_data.size(), encoded.data());
|
||||
encoded.resize(encoded_size);
|
||||
|
||||
PODArray<char> decoded(source_data.size());
|
||||
const UInt32 decoded_size = codec.decompress(encoded.data(), encoded.size(), decoded.data());
|
||||
decoded.resize(decoded_size);
|
||||
|
||||
switch (test_sequence.data_type->getSizeOfValueInMemory())
|
||||
{
|
||||
case 1:
|
||||
ASSERT_TRUE(EqualByteContainersAs<UInt8>(source_data, decoded));
|
||||
break;
|
||||
case 2:
|
||||
ASSERT_TRUE(EqualByteContainersAs<UInt16>(source_data, decoded));
|
||||
break;
|
||||
case 4:
|
||||
ASSERT_TRUE(EqualByteContainersAs<UInt32>(source_data, decoded));
|
||||
break;
|
||||
case 8:
|
||||
ASSERT_TRUE(EqualByteContainersAs<UInt64>(source_data, decoded));
|
||||
break;
|
||||
default:
|
||||
FAIL() << "Invalid test sequence data type: " << test_sequence.data_type->getName();
|
||||
}
|
||||
const auto header_size = codec.getHeaderSize();
|
||||
const auto compression_ratio = (encoded_size - header_size) / (source_data.size() * 1.0);
|
||||
|
||||
const auto & codec_spec = std::get<0>(GetParam());
|
||||
if (codec_spec.expected_compression_ratio)
|
||||
{
|
||||
ASSERT_LE(compression_ratio, *codec_spec.expected_compression_ratio)
|
||||
<< "\n\tdecoded size: " << source_data.size()
|
||||
<< "\n\tencoded size: " << encoded_size
|
||||
<< "(no header: " << encoded_size - header_size << ")";
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(CodecTest, DoubleDelta)
|
||||
TEST_P(CodecTest, TranscodingWithDataType)
|
||||
{
|
||||
auto param = GetParam();
|
||||
auto codec = std::make_unique<CompressionCodecDoubleDelta>(param.data_byte_size);
|
||||
if (param.type_name == type_name<Float32>() || param.type_name == type_name<Float64>())
|
||||
{
|
||||
// dd doesn't work great with many cases of integers and may result in very poor compression rate.
|
||||
param.min_compression_ratio *= 1.5;
|
||||
}
|
||||
|
||||
TestTranscoding(codec.get(), param);
|
||||
const auto codec = makeCodec(CODEC_WITH_DATA_TYPE);
|
||||
testTranscoding(*codec);
|
||||
}
|
||||
|
||||
TEST_P(CodecTest, Gorilla)
|
||||
TEST_P(CodecTest, TranscodingWithoutDataType)
|
||||
{
|
||||
auto param = GetParam();
|
||||
auto codec = std::make_unique<CompressionCodecGorilla>(param.data_byte_size);
|
||||
if (param.type_name == type_name<UInt32>() || param.type_name == type_name<Int32>()
|
||||
|| param.type_name == type_name<UInt64>() || param.type_name == type_name<Int64>())
|
||||
{
|
||||
// gorilla doesn't work great with many cases of integers and may result in very poor compression rate.
|
||||
param.min_compression_ratio *= 1.5;
|
||||
}
|
||||
|
||||
TestTranscoding(codec.get(), param);
|
||||
const auto codec = makeCodec(CODEC_WITHOUT_DATA_TYPE);
|
||||
testTranscoding(*codec);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here we use generators to produce test payload for codecs.
|
||||
// Generator is a callable that should produce output value of the same type as input value.
|
||||
// Generator is a callable that can produce infinite number of values,
|
||||
// output value MUST be of the same type input value.
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
auto SameValueGenerator = [](auto value)
|
||||
{
|
||||
@ -332,141 +439,427 @@ auto SequentialGenerator = [](auto stride = 1)
|
||||
//};
|
||||
|
||||
template <typename T>
|
||||
using uniform_distribution =
|
||||
typename std::conditional_t<std::is_floating_point_v<T>, std::uniform_real_distribution<T>,
|
||||
typename std::conditional_t<std::is_integral_v<T>, std::uniform_int_distribution<T>, void>>;
|
||||
|
||||
|
||||
template <typename T = Int32>
|
||||
struct MonotonicGenerator
|
||||
{
|
||||
MonotonicGenerator(T stride = 1, size_t max_step = 10)
|
||||
MonotonicGenerator(T stride_ = 1, T max_step = 10)
|
||||
: prev_value(0),
|
||||
stride(stride),
|
||||
max_step(max_step)
|
||||
stride(stride_),
|
||||
random_engine(0),
|
||||
distribution(0, max_step)
|
||||
{}
|
||||
|
||||
template <typename U>
|
||||
U operator()(U)
|
||||
{
|
||||
const U result = prev_value + static_cast<T>(stride * (rand() % max_step));
|
||||
|
||||
prev_value = result;
|
||||
return result;
|
||||
prev_value = prev_value + stride * distribution(random_engine);
|
||||
return static_cast<U>(prev_value);
|
||||
}
|
||||
|
||||
private:
|
||||
T prev_value;
|
||||
const T stride;
|
||||
const size_t max_step;
|
||||
};
|
||||
|
||||
auto MinMaxGenerator = [](auto i)
|
||||
{
|
||||
if (i % 2 == 0)
|
||||
{
|
||||
return std::numeric_limits<decltype(i)>::min();
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::numeric_limits<decltype(i)>::max();
|
||||
}
|
||||
std::default_random_engine random_engine;
|
||||
uniform_distribution<T> distribution;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct RandomGenerator
|
||||
{
|
||||
RandomGenerator(T seed = 0, T value_cap = std::numeric_limits<T>::max())
|
||||
: e(seed),
|
||||
value_cap(value_cap)
|
||||
RandomGenerator(T seed = 0, T value_min = std::numeric_limits<T>::min(), T value_max = std::numeric_limits<T>::max())
|
||||
: random_engine(seed),
|
||||
distribution(value_min, value_max)
|
||||
{
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
U operator()(U i)
|
||||
U operator()(U)
|
||||
{
|
||||
return static_cast<decltype(i)>(distribution(e) % value_cap);
|
||||
return static_cast<U>(distribution(random_engine));
|
||||
}
|
||||
|
||||
private:
|
||||
std::default_random_engine e;
|
||||
std::uniform_int_distribution<T> distribution;
|
||||
const T value_cap;
|
||||
std::default_random_engine random_engine;
|
||||
uniform_distribution<T> distribution;
|
||||
};
|
||||
|
||||
auto RandomishGenerator = [](auto i)
|
||||
{
|
||||
return static_cast<decltype(i)>(sin(static_cast<double>(i) * i) * i);
|
||||
return static_cast<decltype(i)>(sin(static_cast<double>(i * i)) * i);
|
||||
};
|
||||
|
||||
// helper macro to produce human-friendly test case name
|
||||
auto MinMaxGenerator = []()
|
||||
{
|
||||
return [step = 0](auto i) mutable
|
||||
{
|
||||
if (step++ % 2 == 0)
|
||||
{
|
||||
return std::numeric_limits<decltype(i)>::min();
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::numeric_limits<decltype(i)>::max();
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Fill dest value with 0x00 or 0xFF
|
||||
auto FFand0Generator = []()
|
||||
{
|
||||
return [step = 0](auto i) mutable
|
||||
{
|
||||
decltype(i) result;
|
||||
if (step++ % 2 == 0)
|
||||
{
|
||||
memset(&result, 0, sizeof(result));
|
||||
}
|
||||
else
|
||||
{
|
||||
memset(&result, 0xFF, sizeof(result));
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
// Makes many sequences with generator, first sequence length is 1, second is 2... up to `sequences_count`.
|
||||
template <typename T, typename Generator>
|
||||
std::vector<CodecTestSequence> generatePyramidOfSequences(const size_t sequences_count, Generator && generator, const char* generator_name)
|
||||
{
|
||||
std::vector<CodecTestSequence> sequences;
|
||||
sequences.reserve(sequences_count);
|
||||
for (size_t i = 1; i < sequences_count; ++i)
|
||||
{
|
||||
std::string name = generator_name + std::string(" from 0 to ") + std::to_string(i);
|
||||
sequences.push_back(generateSeq<T>(std::forward<decltype(generator)>(generator), name.c_str(), 0, i));
|
||||
}
|
||||
|
||||
return sequences;
|
||||
};
|
||||
|
||||
|
||||
// helper macro to produce human-friendly sequence name from generator
|
||||
#define G(generator) generator, #generator
|
||||
|
||||
const auto DefaultCodecsToTest = ::testing::Values(
|
||||
Codec("DoubleDelta"),
|
||||
Codec("DoubleDelta, LZ4"),
|
||||
Codec("DoubleDelta, ZSTD"),
|
||||
Codec("Gorilla"),
|
||||
Codec("Gorilla, LZ4"),
|
||||
Codec("Gorilla, ZSTD")
|
||||
);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// test cases
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Simple,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
makeSeq<Float64>(1, 2, 3, 5, 7, 11, 13, 17, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97)
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(SmallSequences,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::ValuesIn(
|
||||
generatePyramidOfSequences<Int8 >(42, G(SequentialGenerator(1)))
|
||||
+ generatePyramidOfSequences<Int16 >(42, G(SequentialGenerator(1)))
|
||||
+ generatePyramidOfSequences<Int32 >(42, G(SequentialGenerator(1)))
|
||||
+ generatePyramidOfSequences<Int64 >(42, G(SequentialGenerator(1)))
|
||||
+ generatePyramidOfSequences<UInt8 >(42, G(SequentialGenerator(1)))
|
||||
+ generatePyramidOfSequences<UInt16>(42, G(SequentialGenerator(1)))
|
||||
+ generatePyramidOfSequences<UInt32>(42, G(SequentialGenerator(1)))
|
||||
+ generatePyramidOfSequences<UInt64>(42, G(SequentialGenerator(1)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Mixed,
|
||||
CodecTest,
|
||||
::testing::Values(
|
||||
generateParam<Int32, 1, 3>(G(MinMaxGenerator)) + generateParam<Int32, 1, 11>(G(SequentialGenerator(1))).setRatio(1),
|
||||
generateParam<UInt32, 1, 3>(G(MinMaxGenerator)) + generateParam<UInt32, 1, 11>(G(SequentialGenerator(1))).setRatio(1),
|
||||
generateParam<Int64, 1, 3>(G(MinMaxGenerator)) + generateParam<Int64, 1, 11>(G(SequentialGenerator(1))).setRatio(1),
|
||||
generateParam<UInt64, 1, 3>(G(MinMaxGenerator)) + generateParam<UInt64, 1, 11>(G(SequentialGenerator(1))).setRatio(1)
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Int8>(G(MinMaxGenerator()), 1, 5) + generateSeq<Int8>(G(SequentialGenerator(1)), 1, 1001),
|
||||
generateSeq<Int16>(G(MinMaxGenerator()), 1, 5) + generateSeq<Int16>(G(SequentialGenerator(1)), 1, 1001),
|
||||
generateSeq<Int32>(G(MinMaxGenerator()), 1, 5) + generateSeq<Int32>(G(SequentialGenerator(1)), 1, 1001),
|
||||
generateSeq<Int64>(G(MinMaxGenerator()), 1, 5) + generateSeq<Int64>(G(SequentialGenerator(1)), 1, 1001),
|
||||
generateSeq<UInt8>(G(MinMaxGenerator()), 1, 5) + generateSeq<UInt8>(G(SequentialGenerator(1)), 1, 1001),
|
||||
generateSeq<UInt16>(G(MinMaxGenerator()), 1, 5) + generateSeq<UInt16>(G(SequentialGenerator(1)), 1, 1001),
|
||||
generateSeq<UInt32>(G(MinMaxGenerator()), 1, 5) + generateSeq<UInt32>(G(SequentialGenerator(1)), 1, 1001),
|
||||
generateSeq<UInt64>(G(MinMaxGenerator()), 1, 5) + generateSeq<UInt64>(G(SequentialGenerator(1)), 1, 1001)
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Same,
|
||||
INSTANTIATE_TEST_CASE_P(SameValueInt,
|
||||
CodecTest,
|
||||
::testing::Values(
|
||||
generateParam<UInt32>(G(SameValueGenerator(1000))),
|
||||
generateParam<Int32>(G(SameValueGenerator(-1000))),
|
||||
generateParam<UInt64>(G(SameValueGenerator(1000))),
|
||||
generateParam<Int64>(G(SameValueGenerator(-1000))),
|
||||
generateParam<Float32>(G(SameValueGenerator(M_E))),
|
||||
generateParam<Float64>(G(SameValueGenerator(M_E)))
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Int8 >(G(SameValueGenerator(1000))),
|
||||
generateSeq<Int16 >(G(SameValueGenerator(1000))),
|
||||
generateSeq<Int32 >(G(SameValueGenerator(1000))),
|
||||
generateSeq<Int64 >(G(SameValueGenerator(1000))),
|
||||
generateSeq<UInt8 >(G(SameValueGenerator(1000))),
|
||||
generateSeq<UInt16>(G(SameValueGenerator(1000))),
|
||||
generateSeq<UInt32>(G(SameValueGenerator(1000))),
|
||||
generateSeq<UInt64>(G(SameValueGenerator(1000)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Sequential,
|
||||
INSTANTIATE_TEST_CASE_P(SameNegativeValueInt,
|
||||
CodecTest,
|
||||
::testing::Values(
|
||||
generateParam<UInt32>(G(SequentialGenerator(1))),
|
||||
generateParam<Int32>(G(SequentialGenerator(-1))),
|
||||
generateParam<UInt64>(G(SequentialGenerator(1))),
|
||||
generateParam<Int64>(G(SequentialGenerator(-1))),
|
||||
generateParam<Float32>(G(SequentialGenerator(M_E))),
|
||||
generateParam<Float64>(G(SequentialGenerator(M_E)))
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Int8 >(G(SameValueGenerator(-1000))),
|
||||
generateSeq<Int16 >(G(SameValueGenerator(-1000))),
|
||||
generateSeq<Int32 >(G(SameValueGenerator(-1000))),
|
||||
generateSeq<Int64 >(G(SameValueGenerator(-1000))),
|
||||
generateSeq<UInt8 >(G(SameValueGenerator(-1000))),
|
||||
generateSeq<UInt16>(G(SameValueGenerator(-1000))),
|
||||
generateSeq<UInt32>(G(SameValueGenerator(-1000))),
|
||||
generateSeq<UInt64>(G(SameValueGenerator(-1000)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Monotonic,
|
||||
INSTANTIATE_TEST_CASE_P(SameValueFloat,
|
||||
CodecTest,
|
||||
::testing::Values(
|
||||
generateParam<UInt32>(G(MonotonicGenerator<UInt32>(1, 5))),
|
||||
generateParam<Int32>(G(MonotonicGenerator<Int32>(-1, 5))),
|
||||
generateParam<UInt64>(G(MonotonicGenerator<UInt64>(1, 5))),
|
||||
generateParam<Int64>(G(MonotonicGenerator<Int64>(-1, 5))),
|
||||
generateParam<Float32>(G(MonotonicGenerator<Float32>(M_E, 5))),
|
||||
generateParam<Float64>(G(MonotonicGenerator<Float64>(M_E, 5)))
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
Codec("Gorilla"),
|
||||
Codec("Gorilla, LZ4")
|
||||
),
|
||||
::testing::Values(
|
||||
generateSeq<Float32>(G(SameValueGenerator(M_E))),
|
||||
generateSeq<Float64>(G(SameValueGenerator(M_E)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Random,
|
||||
INSTANTIATE_TEST_CASE_P(SameNegativeValueFloat,
|
||||
CodecTest,
|
||||
::testing::Values(
|
||||
generateParam<UInt32>(G(RandomGenerator<UInt32>(0, 1000'000'000))).setRatio(1.2),
|
||||
generateParam<UInt64>(G(RandomGenerator<UInt64>(0, 1000'000'000))).setRatio(1.1)
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
Codec("Gorilla"),
|
||||
Codec("Gorilla, LZ4")
|
||||
),
|
||||
::testing::Values(
|
||||
generateSeq<Float32>(G(SameValueGenerator(-1 * M_E))),
|
||||
generateSeq<Float64>(G(SameValueGenerator(-1 * M_E)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Randomish,
|
||||
INSTANTIATE_TEST_CASE_P(SequentialInt,
|
||||
CodecTest,
|
||||
::testing::Values(
|
||||
generateParam<Int32>(G(RandomishGenerator)).setRatio(1.1),
|
||||
generateParam<Int64>(G(RandomishGenerator)).setRatio(1.1),
|
||||
generateParam<UInt32>(G(RandomishGenerator)).setRatio(1.1),
|
||||
generateParam<UInt64>(G(RandomishGenerator)).setRatio(1.1),
|
||||
generateParam<Float32>(G(RandomishGenerator)).setRatio(1.1),
|
||||
generateParam<Float64>(G(RandomishGenerator)).setRatio(1.1)
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Int8 >(G(SequentialGenerator(1))),
|
||||
generateSeq<Int16 >(G(SequentialGenerator(1))),
|
||||
generateSeq<Int32 >(G(SequentialGenerator(1))),
|
||||
generateSeq<Int64 >(G(SequentialGenerator(1))),
|
||||
generateSeq<UInt8 >(G(SequentialGenerator(1))),
|
||||
generateSeq<UInt16>(G(SequentialGenerator(1))),
|
||||
generateSeq<UInt32>(G(SequentialGenerator(1))),
|
||||
generateSeq<UInt64>(G(SequentialGenerator(1)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Overflow,
|
||||
// -1, -2, -3, ... etc for signed
|
||||
// 0xFF, 0xFE, 0xFD, ... for unsigned
|
||||
INSTANTIATE_TEST_CASE_P(SequentialReverseInt,
|
||||
CodecTest,
|
||||
::testing::Values(
|
||||
generateParam<UInt32>(G(MinMaxGenerator)),
|
||||
generateParam<Int32>(G(MinMaxGenerator)),
|
||||
generateParam<UInt64>(G(MinMaxGenerator)),
|
||||
generateParam<Int64>(G(MinMaxGenerator))
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Int8 >(G(SequentialGenerator(-1))),
|
||||
generateSeq<Int16 >(G(SequentialGenerator(-1))),
|
||||
generateSeq<Int32 >(G(SequentialGenerator(-1))),
|
||||
generateSeq<Int64 >(G(SequentialGenerator(-1))),
|
||||
generateSeq<UInt8 >(G(SequentialGenerator(-1))),
|
||||
generateSeq<UInt16>(G(SequentialGenerator(-1))),
|
||||
generateSeq<UInt32>(G(SequentialGenerator(-1))),
|
||||
generateSeq<UInt64>(G(SequentialGenerator(-1)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(SequentialFloat,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
Codec("Gorilla"),
|
||||
Codec("Gorilla, LZ4")
|
||||
),
|
||||
::testing::Values(
|
||||
generateSeq<Float32>(G(SequentialGenerator(M_E))),
|
||||
generateSeq<Float64>(G(SequentialGenerator(M_E)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(SequentialReverseFloat,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
Codec("Gorilla"),
|
||||
Codec("Gorilla, LZ4")
|
||||
),
|
||||
::testing::Values(
|
||||
generateSeq<Float32>(G(SequentialGenerator(-1 * M_E))),
|
||||
generateSeq<Float64>(G(SequentialGenerator(-1 * M_E)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MonotonicInt,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Int8 >(G(MonotonicGenerator(1, 5))),
|
||||
generateSeq<Int16 >(G(MonotonicGenerator(1, 5))),
|
||||
generateSeq<Int32 >(G(MonotonicGenerator(1, 5))),
|
||||
generateSeq<Int64 >(G(MonotonicGenerator(1, 5))),
|
||||
generateSeq<UInt8 >(G(MonotonicGenerator(1, 5))),
|
||||
generateSeq<UInt16>(G(MonotonicGenerator(1, 5))),
|
||||
generateSeq<UInt32>(G(MonotonicGenerator(1, 5))),
|
||||
generateSeq<UInt64>(G(MonotonicGenerator(1, 5)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MonotonicReverseInt,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Int8 >(G(MonotonicGenerator(-1, 5))),
|
||||
generateSeq<Int16 >(G(MonotonicGenerator(-1, 5))),
|
||||
generateSeq<Int32 >(G(MonotonicGenerator(-1, 5))),
|
||||
generateSeq<Int64 >(G(MonotonicGenerator(-1, 5))),
|
||||
generateSeq<UInt8 >(G(MonotonicGenerator(-1, 5))),
|
||||
generateSeq<UInt16>(G(MonotonicGenerator(-1, 5))),
|
||||
generateSeq<UInt32>(G(MonotonicGenerator(-1, 5))),
|
||||
generateSeq<UInt64>(G(MonotonicGenerator(-1, 5)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MonotonicFloat,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
Codec("Gorilla")
|
||||
),
|
||||
::testing::Values(
|
||||
generateSeq<Float32>(G(MonotonicGenerator<Float32>(M_E, 5))),
|
||||
generateSeq<Float64>(G(MonotonicGenerator<Float64>(M_E, 5)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(MonotonicReverseFloat,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
Codec("Gorilla")
|
||||
),
|
||||
::testing::Values(
|
||||
generateSeq<Float32>(G(MonotonicGenerator<Float32>(-1 * M_E, 5))),
|
||||
generateSeq<Float64>(G(MonotonicGenerator<Float64>(-1 * M_E, 5)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(RandomInt,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<UInt8 >(G(RandomGenerator<UInt8>(0))),
|
||||
generateSeq<UInt16>(G(RandomGenerator<UInt16>(0))),
|
||||
generateSeq<UInt32>(G(RandomGenerator<UInt32>(0, 0, 1000'000'000))),
|
||||
generateSeq<UInt64>(G(RandomGenerator<UInt64>(0, 0, 1000'000'000)))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(RandomishInt,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Int32>(G(RandomishGenerator)),
|
||||
generateSeq<Int64>(G(RandomishGenerator)),
|
||||
generateSeq<UInt32>(G(RandomishGenerator)),
|
||||
generateSeq<UInt64>(G(RandomishGenerator)),
|
||||
generateSeq<Float32>(G(RandomishGenerator)),
|
||||
generateSeq<Float64>(G(RandomishGenerator))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(RandomishFloat,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
DefaultCodecsToTest,
|
||||
::testing::Values(
|
||||
generateSeq<Float32>(G(RandomishGenerator)),
|
||||
generateSeq<Float64>(G(RandomishGenerator))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
// Double delta overflow case, deltas are out of bounds for target type
|
||||
INSTANTIATE_TEST_CASE_P(OverflowInt,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
Codec("DoubleDelta", 1.2),
|
||||
Codec("DoubleDelta, LZ4", 1.0)
|
||||
),
|
||||
::testing::Values(
|
||||
generateSeq<UInt32>(G(MinMaxGenerator())),
|
||||
generateSeq<Int32>(G(MinMaxGenerator())),
|
||||
generateSeq<UInt64>(G(MinMaxGenerator())),
|
||||
generateSeq<Int64>(G(MinMaxGenerator()))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(OverflowFloat,
|
||||
CodecTest,
|
||||
::testing::Combine(
|
||||
::testing::Values(
|
||||
Codec("Gorilla", 1.1),
|
||||
Codec("Gorilla, LZ4", 1.0)
|
||||
),
|
||||
::testing::Values(
|
||||
generateSeq<Float32>(G(MinMaxGenerator())),
|
||||
generateSeq<Float64>(G(MinMaxGenerator())),
|
||||
generateSeq<Float32>(G(FFand0Generator())),
|
||||
generateSeq<Float64>(G(FFand0Generator()))
|
||||
)
|
||||
),
|
||||
);
|
||||
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ namespace DB
|
||||
class TaskNotification final : public Poco::Notification
|
||||
{
|
||||
public:
|
||||
explicit TaskNotification(const BackgroundSchedulePoolTaskInfoPtr & task) : task(task) {}
|
||||
explicit TaskNotification(const BackgroundSchedulePoolTaskInfoPtr & task_) : task(task_) {}
|
||||
void execute() { task->execute(); }
|
||||
|
||||
private:
|
||||
@ -155,8 +155,8 @@ Coordination::WatchCallback BackgroundSchedulePoolTaskInfo::getWatchCallback()
|
||||
}
|
||||
|
||||
|
||||
BackgroundSchedulePool::BackgroundSchedulePool(size_t size)
|
||||
: size(size)
|
||||
BackgroundSchedulePool::BackgroundSchedulePool(size_t size_)
|
||||
: size(size_)
|
||||
{
|
||||
LOG_INFO(&Logger::get("BackgroundSchedulePool"), "Create BackgroundSchedulePool with " << size << " threads");
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user