diff --git a/CHANGELOG.md b/CHANGELOG.md index f9c461e63e4..2a0b69bcc6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -185,7 +185,7 @@ * Fixed "select_format" performance test for `Pretty` formats [#5642](https://github.com/yandex/ClickHouse/pull/5642) ([alexey-milovidov](https://github.com/alexey-milovidov)) -## ClickHouse release 19.9.4.1, 2019-07-05 +## ClickHouse release 19.9.3.31, 2019-07-05 ### Bug Fix * Fix segfault in Delta codec which affects columns with values less than 32 bits size. The bug led to random memory corruption. [#5786](https://github.com/yandex/ClickHouse/pull/5786) ([alesapin](https://github.com/alesapin)) @@ -197,7 +197,7 @@ * Fix race condition, which cause that some queries may not appear in query_log instantly after SYSTEM FLUSH LOGS query. [#5685](https://github.com/yandex/ClickHouse/pull/5685) ([Anton Popov](https://github.com/CurtizJ)) * Added missing support for constant arguments to `evalMLModel` function. [#5820](https://github.com/yandex/ClickHouse/pull/5820) ([alexey-milovidov](https://github.com/alexey-milovidov)) -## ClickHouse release 19.7.6.1, 2019-07-05 +## ClickHouse release 19.7.5.29, 2019-07-05 ### Bug Fix * Fix performance regression in some queries with JOIN. [#5192](https://github.com/yandex/ClickHouse/pull/5192) ([Winter Zhang](https://github.com/zhang2014)) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5e03af27cfa..d369dca7e78 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -437,10 +437,10 @@ message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE include(GNUInstallDirs) include (cmake/find_contrib_lib.cmake) +include (cmake/lib_name.cmake) find_contrib_lib(double-conversion) # Must be before parquet include (cmake/find_ssl.cmake) -include (cmake/lib_name.cmake) include (cmake/find_icu.cmake) include (cmake/find_boost.cmake) include (cmake/find_zlib.cmake) diff --git a/cmake/find_fastops.cmake b/cmake/find_fastops.cmake index c8ddbaf80a7..08a977c240b 100644 --- a/cmake/find_fastops.cmake +++ b/cmake/find_fastops.cmake @@ -1,4 +1,6 @@ -option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Michael Parakhin" ${NOT_UNBUNDLED}) +if (NOT ARCH_ARM AND NOT OS_FREEBSD) + option (ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${NOT_UNBUNDLED}) +endif () if (ENABLE_FASTOPS) if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fastops/fastops/fastops.h") @@ -12,4 +14,4 @@ else () set(USE_FASTOPS 0) endif () -message (STATUS "Using fastops") +message (STATUS "Using fastops=${USE_FASTOPS}: ${FASTOPS_INCLUDE_DIR} : ${FASTOPS_LIBRARY}") diff --git a/contrib/poco b/contrib/poco index ea2516be366..7a2d304c215 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit ea2516be366a73a02a82b499ed4a7db1d40037e0 +Subproject commit 7a2d304c21549427460428c9039009ef4bbfd899 diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index a9e276852c1..b589c398238 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -48,7 +48,7 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow -Wshadow-uncaptured-local -Wextra-semi -Wcomma -Winconsistent-missing-destructor-override -Wunused-exception-parameter -Wcovered-switch-default -Wold-style-cast -Wrange-loop-analysis -Wunused-member-function -Wunreachable-code -Wunreachable-code-return -Wnewline-eof -Wembedded-directive -Wgnu-case-range -Wunused-macros -Wconditional-uninitialized -Wdeprecated -Wundef -Wreserved-id-macro -Wredundant-parens -Wzero-as-null-pointer-constant") if (WEVERYTHING) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-shadow-field-in-constructor -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-padded -Wno-switch-enum -Wno-deprecated-dynamic-exception-spec -Wno-float-equal -Wno-weak-vtables -Wno-shift-sign-overflow -Wno-sign-conversion -Wno-conversion -Wno-exit-time-destructors -Wno-undefined-func-template -Wno-documentation-unknown-command -Wno-missing-variable-declarations -Wno-unused-template -Wno-global-constructors -Wno-c99-extensions -Wno-missing-prototypes -Wno-weak-template-vtables -Wno-zero-length-array -Wno-gnu-anonymous-struct -Wno-nested-anon-types -Wno-double-promotion -Wno-disabled-macro-expansion -Wno-vla-extension -Wno-vla -Wno-packed") # TODO Enable conversion, sign-conversion, double-promotion warnings. endif () @@ -71,7 +71,9 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-ctad-maybe-unsupported") endif () endif () -endif () +elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wshadow") +endif() if (USE_DEBUG_HELPERS) set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/libs/libcommon/include/common/iostream_debug_helpers.h") diff --git a/dbms/programs/copier/ClusterCopier.cpp b/dbms/programs/copier/ClusterCopier.cpp index 43158dedd71..435d06da854 100644 --- a/dbms/programs/copier/ClusterCopier.cpp +++ b/dbms/programs/copier/ClusterCopier.cpp @@ -123,7 +123,7 @@ enum class TaskState struct TaskStateWithOwner { TaskStateWithOwner() = default; - TaskStateWithOwner(TaskState state, const String & owner) : state(state), owner(owner) {} + TaskStateWithOwner(TaskState state_, const String & owner_) : state(state_), owner(owner_) {} TaskState state{TaskState::Unknown}; String owner; @@ -2100,9 +2100,9 @@ void ClusterCopierApp::initialize(Poco::Util::Application & self) // process_id is '#_' time_t timestamp = Poco::Timestamp().epochTime(); - auto pid = Poco::Process::id(); + auto curr_pid = Poco::Process::id(); - process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(pid); + process_id = std::to_string(DateLUT::instance().toNumYYYYMMDDhhmmss(timestamp)) + "_" + std::to_string(curr_pid); host_id = escapeForFileName(getFQDNOrHostName()) + '#' + process_id; process_path = Poco::Path(base_dir + "/clickhouse-copier_" + process_id).absolute().toString(); Poco::File(process_path).createDirectories(); diff --git a/dbms/programs/obfuscator/Obfuscator.cpp b/dbms/programs/obfuscator/Obfuscator.cpp index 3c20510d481..a96c10072dc 100644 --- a/dbms/programs/obfuscator/Obfuscator.cpp +++ b/dbms/programs/obfuscator/Obfuscator.cpp @@ -176,7 +176,7 @@ private: const UInt64 seed; public: - UnsignedIntegerModel(UInt64 seed) : seed(seed) {} + UnsignedIntegerModel(UInt64 seed_) : seed(seed_) {} void train(const IColumn &) override {} void finalize() override {} @@ -212,7 +212,7 @@ private: const UInt64 seed; public: - SignedIntegerModel(UInt64 seed) : seed(seed) {} + SignedIntegerModel(UInt64 seed_) : seed(seed_) {} void train(const IColumn &) override {} void finalize() override {} @@ -256,7 +256,7 @@ private: Float res_prev_value = 0; public: - FloatModel(UInt64 seed) : seed(seed) {} + FloatModel(UInt64 seed_) : seed(seed_) {} void train(const IColumn &) override {} void finalize() override {} @@ -348,7 +348,7 @@ private: const UInt64 seed; public: - FixedStringModel(UInt64 seed) : seed(seed) {} + FixedStringModel(UInt64 seed_) : seed(seed_) {} void train(const IColumn &) override {} void finalize() override {} @@ -385,7 +385,7 @@ private: const DateLUTImpl & date_lut; public: - DateTimeModel(UInt64 seed) : seed(seed), date_lut(DateLUT::instance()) {} + DateTimeModel(UInt64 seed_) : seed(seed_), date_lut(DateLUT::instance()) {} void train(const IColumn &) override {} void finalize() override {} @@ -533,8 +533,8 @@ private: } public: - MarkovModel(MarkovModelParameters params) - : params(std::move(params)), code_points(params.order, BEGIN) {} + MarkovModel(MarkovModelParameters params_) + : params(std::move(params_)), code_points(params.order, BEGIN) {} void consume(const char * data, size_t size) { @@ -745,7 +745,7 @@ private: MarkovModel markov_model; public: - StringModel(UInt64 seed, MarkovModelParameters params) : seed(seed), markov_model(std::move(params)) {} + StringModel(UInt64 seed_, MarkovModelParameters params_) : seed(seed_), markov_model(std::move(params_)) {} void train(const IColumn & column) override { @@ -797,7 +797,7 @@ private: ModelPtr nested_model; public: - ArrayModel(ModelPtr nested_model) : nested_model(std::move(nested_model)) {} + ArrayModel(ModelPtr nested_model_) : nested_model(std::move(nested_model_)) {} void train(const IColumn & column) override { @@ -830,7 +830,7 @@ private: ModelPtr nested_model; public: - NullableModel(ModelPtr nested_model) : nested_model(std::move(nested_model)) {} + NullableModel(ModelPtr nested_model_) : nested_model(std::move(nested_model_)) {} void train(const IColumn & column) override { diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp b/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp index dcedac47f42..70aaba3f137 100644 --- a/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp +++ b/dbms/programs/odbc-bridge/ODBCBlockInputStream.cpp @@ -18,12 +18,12 @@ namespace ErrorCodes ODBCBlockInputStream::ODBCBlockInputStream( - Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size) - : session{session} + Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_) + : session{session_} , statement{(this->session << query_str, Poco::Data::Keywords::now)} , result{statement} , iterator{result.begin()} - , max_block_size{max_block_size} + , max_block_size{max_block_size_} , log(&Logger::get("ODBCBlockInputStream")) { if (sample_block.columns() != result.columnCount()) @@ -43,46 +43,46 @@ namespace { switch (type) { - case ValueType::UInt8: + case ValueType::vtUInt8: static_cast(column).insertValue(value.convert()); break; - case ValueType::UInt16: + case ValueType::vtUInt16: static_cast(column).insertValue(value.convert()); break; - case ValueType::UInt32: + case ValueType::vtUInt32: static_cast(column).insertValue(value.convert()); break; - case ValueType::UInt64: + case ValueType::vtUInt64: static_cast(column).insertValue(value.convert()); break; - case ValueType::Int8: + case ValueType::vtInt8: static_cast(column).insertValue(value.convert()); break; - case ValueType::Int16: + case ValueType::vtInt16: static_cast(column).insertValue(value.convert()); break; - case ValueType::Int32: + case ValueType::vtInt32: static_cast(column).insertValue(value.convert()); break; - case ValueType::Int64: + case ValueType::vtInt64: static_cast(column).insertValue(value.convert()); break; - case ValueType::Float32: + case ValueType::vtFloat32: static_cast(column).insertValue(value.convert()); break; - case ValueType::Float64: + case ValueType::vtFloat64: static_cast(column).insertValue(value.convert()); break; - case ValueType::String: + case ValueType::vtString: static_cast(column).insert(value.convert()); break; - case ValueType::Date: + case ValueType::vtDate: static_cast(column).insertValue(UInt16{LocalDate{value.convert()}.getDayNum()}); break; - case ValueType::DateTime: + case ValueType::vtDateTime: static_cast(column).insertValue(time_t{LocalDateTime{value.convert()}}); break; - case ValueType::UUID: + case ValueType::vtUUID: static_cast(column).insert(parse(value.convert())); break; } diff --git a/dbms/programs/odbc-bridge/ODBCBlockInputStream.h b/dbms/programs/odbc-bridge/ODBCBlockInputStream.h index d22aad91232..13491e05822 100644 --- a/dbms/programs/odbc-bridge/ODBCBlockInputStream.h +++ b/dbms/programs/odbc-bridge/ODBCBlockInputStream.h @@ -16,7 +16,7 @@ class ODBCBlockInputStream final : public IBlockInputStream { public: ODBCBlockInputStream( - Poco::Data::Session && session, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size); + Poco::Data::Session && session_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_); String getName() const override { return "ODBC"; } diff --git a/dbms/programs/performance-test/PerformanceTestSuite.cpp b/dbms/programs/performance-test/PerformanceTestSuite.cpp index 6ddcc67f48a..14ea8882a6b 100644 --- a/dbms/programs/performance-test/PerformanceTestSuite.cpp +++ b/dbms/programs/performance-test/PerformanceTestSuite.cpp @@ -324,7 +324,6 @@ try using po::value; using Strings = DB::Strings; - po::options_description desc("Allowed options"); desc.add_options() ("help", "produce help message") diff --git a/dbms/programs/server/MetricsTransmitter.cpp b/dbms/programs/server/MetricsTransmitter.cpp index e8cf4a2c21b..8419d3e1b8c 100644 --- a/dbms/programs/server/MetricsTransmitter.cpp +++ b/dbms/programs/server/MetricsTransmitter.cpp @@ -16,8 +16,8 @@ namespace DB { MetricsTransmitter::MetricsTransmitter( - const Poco::Util::AbstractConfiguration & config, const std::string & config_name, const AsynchronousMetrics & async_metrics) - : async_metrics(async_metrics), config_name(config_name) + const Poco::Util::AbstractConfiguration & config, const std::string & config_name_, const AsynchronousMetrics & async_metrics_) + : async_metrics(async_metrics_), config_name(config_name_) { interval_seconds = config.getInt(config_name + ".interval", 60); send_events = config.getBool(config_name + ".events", true); diff --git a/dbms/programs/server/MetricsTransmitter.h b/dbms/programs/server/MetricsTransmitter.h index 69a11bf2bad..b9c7fd7f179 100644 --- a/dbms/programs/server/MetricsTransmitter.h +++ b/dbms/programs/server/MetricsTransmitter.h @@ -32,7 +32,7 @@ class AsynchronousMetrics; class MetricsTransmitter { public: - MetricsTransmitter(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, const AsynchronousMetrics & async_metrics); + MetricsTransmitter(const Poco::Util::AbstractConfiguration & config, const std::string & config_name_, const AsynchronousMetrics & async_metrics_); ~MetricsTransmitter(); private: diff --git a/dbms/programs/server/MySQLHandler.cpp b/dbms/programs/server/MySQLHandler.cpp index adf35501ee4..ffc2c9ae200 100644 --- a/dbms/programs/server/MySQLHandler.cpp +++ b/dbms/programs/server/MySQLHandler.cpp @@ -37,14 +37,14 @@ namespace ErrorCodes extern const int OPENSSL_ERROR; } -MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key, RSA & private_key, bool ssl_enabled, size_t connection_id) +MySQLHandler::MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key_, RSA & private_key_, bool ssl_enabled, size_t connection_id_) : Poco::Net::TCPServerConnection(socket_) , server(server_) , log(&Poco::Logger::get("MySQLHandler")) , connection_context(server.context()) - , connection_id(connection_id) - , public_key(public_key) - , private_key(private_key) + , connection_id(connection_id_) + , public_key(public_key_) + , private_key(private_key_) { server_capability_flags = CLIENT_PROTOCOL_41 | CLIENT_SECURE_CONNECTION | CLIENT_PLUGIN_AUTH | CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA | CLIENT_CONNECT_WITH_DB | CLIENT_DEPRECATE_EOF; if (ssl_enabled) @@ -77,7 +77,7 @@ void MySQLHandler::run() if (!connection_context.mysql.max_packet_size) connection_context.mysql.max_packet_size = MAX_PACKET_LENGTH; - LOG_DEBUG(log, "Capabilities: " << handshake_response.capability_flags +/* LOG_TRACE(log, "Capabilities: " << handshake_response.capability_flags << "\nmax_packet_size: " << handshake_response.max_packet_size << "\ncharacter_set: " @@ -91,7 +91,7 @@ void MySQLHandler::run() << "\ndatabase: " << handshake_response.database << "\nauth_plugin_name: " - << handshake_response.auth_plugin_name); + << handshake_response.auth_plugin_name);*/ client_capability_flags = handshake_response.capability_flags; if (!(client_capability_flags & CLIENT_PROTOCOL_41)) diff --git a/dbms/programs/server/MySQLHandler.h b/dbms/programs/server/MySQLHandler.h index e899d8ef501..a5465916f8e 100644 --- a/dbms/programs/server/MySQLHandler.h +++ b/dbms/programs/server/MySQLHandler.h @@ -14,7 +14,7 @@ namespace DB class MySQLHandler : public Poco::Net::TCPServerConnection { public: - MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key, RSA & private_key, bool ssl_enabled, size_t connection_id); + MySQLHandler(IServer & server_, const Poco::Net::StreamSocket & socket_, RSA & public_key_, RSA & private_key_, bool ssl_enabled, size_t connection_id_); void run() final; diff --git a/dbms/programs/server/Server.cpp b/dbms/programs/server/Server.cpp index c2fbce603d4..464de1c7066 100644 --- a/dbms/programs/server/Server.cpp +++ b/dbms/programs/server/Server.cpp @@ -156,19 +156,19 @@ std::string Server::getDefaultCorePath() const return getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH)) + "cores"; } -void Server::defineOptions(Poco::Util::OptionSet & _options) +void Server::defineOptions(Poco::Util::OptionSet & options) { - _options.addOption( + options.addOption( Poco::Util::Option("help", "h", "show help and exit") .required(false) .repeatable(false) .binding("help")); - _options.addOption( + options.addOption( Poco::Util::Option("version", "V", "show version and exit") .required(false) .repeatable(false) .binding("version")); - BaseDaemon::defineOptions(_options); + BaseDaemon::defineOptions(options); } int Server::main(const std::vector & /*args*/) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h b/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h index a5a1b777fa3..ec151baa305 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionArgMinMax.h @@ -35,8 +35,8 @@ private: const DataTypePtr & type_val; public: - AggregateFunctionArgMinMax(const DataTypePtr & type_res, const DataTypePtr & type_val) - : IAggregateFunctionDataHelper>({type_res, type_val}, {}), + AggregateFunctionArgMinMax(const DataTypePtr & type_res_, const DataTypePtr & type_val_) + : IAggregateFunctionDataHelper>({type_res_, type_val_}, {}), type_res(this->argument_types[0]), type_val(this->argument_types[1]) { if (!type_val->isComparable()) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h b/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h index d732d65ecf8..f578cee9d00 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionGroupArray.h @@ -253,8 +253,8 @@ class GroupArrayGeneralListImpl final UInt64 max_elems; public: - GroupArrayGeneralListImpl(const DataTypePtr & data_type, UInt64 max_elems_ = std::numeric_limits::max()) - : IAggregateFunctionDataHelper, GroupArrayGeneralListImpl>({data_type}, {}) + GroupArrayGeneralListImpl(const DataTypePtr & data_type_, UInt64 max_elems_ = std::numeric_limits::max()) + : IAggregateFunctionDataHelper, GroupArrayGeneralListImpl>({data_type_}, {}) , data_type(this->argument_types[0]), max_elems(max_elems_) {} String getName() const override { return "groupArray"; } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h index e770dfee7fa..422d1f7a98f 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -454,6 +455,44 @@ public: return count; } + /** + * Return new set with specified range (not include the range_end) + */ + UInt64 rb_range(UInt32 range_start, UInt32 range_end, RoaringBitmapWithSmallSet& r1) const + { + UInt64 count = 0; + if (range_start >= range_end) + return count; + if (isSmall()) + { + std::vector ans; + for (const auto & x : small) + { + T val = x.getValue(); + if ((UInt32)val >= range_start && (UInt32)val < range_end) + { + r1.add(val); + count++; + } + } + } + else + { + roaring_uint32_iterator_t iterator; + roaring_init_iterator(rb, &iterator); + roaring_move_uint32_iterator_equalorlarger(&iterator, range_start); + while (iterator.has_value) + { + if ((UInt32)iterator.current_value >= range_end) + break; + r1.add(iterator.current_value); + roaring_advance_uint32_iterator(&iterator); + count++; + } + } + return count; + } + private: /// To read and write the DB Buffer directly, migrate code from CRoaring void db_roaring_bitmap_add_many(DB::ReadBuffer & dbBuf, roaring_bitmap_t * r, size_t n_args) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h b/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h index 7a913c48ffa..029bf6efe83 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionGroupUniqArray.h @@ -164,8 +164,8 @@ class AggregateFunctionGroupUniqArrayGeneric } public: - AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type, UInt64 max_elems_ = std::numeric_limits::max()) - : IAggregateFunctionDataHelper>({input_data_type}, {}) + AggregateFunctionGroupUniqArrayGeneric(const DataTypePtr & input_data_type_, UInt64 max_elems_ = std::numeric_limits::max()) + : IAggregateFunctionDataHelper>({input_data_type_}, {}) , input_data_type(this->argument_types[0]) , max_elems(max_elems_) {} diff --git a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h b/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h index df6078d86fc..04aa88a806c 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionHistogram.h @@ -304,9 +304,9 @@ private: const UInt32 max_bins; public: - AggregateFunctionHistogram(UInt32 max_bins, const DataTypes & arguments, const Array & params) + AggregateFunctionHistogram(UInt32 max_bins_, const DataTypes & arguments, const Array & params) : IAggregateFunctionDataHelper>(arguments, params) - , max_bins(max_bins) + , max_bins(max_bins_) { } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp b/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp index 9d82e6930ee..2f4962f26a2 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.cpp @@ -104,21 +104,21 @@ void registerAggregateFunctionMLMethod(AggregateFunctionFactory & factory) } LinearModelData::LinearModelData( - Float64 learning_rate, - Float64 l2_reg_coef, - UInt64 param_num, - UInt64 batch_capacity, - std::shared_ptr gradient_computer, - std::shared_ptr weights_updater) - : learning_rate(learning_rate) - , l2_reg_coef(l2_reg_coef) - , batch_capacity(batch_capacity) + Float64 learning_rate_, + Float64 l2_reg_coef_, + UInt64 param_num_, + UInt64 batch_capacity_, + std::shared_ptr gradient_computer_, + std::shared_ptr weights_updater_) + : learning_rate(learning_rate_) + , l2_reg_coef(l2_reg_coef_) + , batch_capacity(batch_capacity_) , batch_size(0) - , gradient_computer(std::move(gradient_computer)) - , weights_updater(std::move(weights_updater)) + , gradient_computer(std::move(gradient_computer_)) + , weights_updater(std::move(weights_updater_)) { - weights.resize(param_num, Float64{0.0}); - gradient_batch.resize(param_num + 1, Float64{0.0}); + weights.resize(param_num_, Float64{0.0}); + gradient_batch.resize(param_num_ + 1, Float64{0.0}); } void LinearModelData::update_state() diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h b/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h index 95ac64c21d8..a5d558364a5 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionMLMethod.h @@ -248,12 +248,12 @@ public: LinearModelData() {} LinearModelData( - Float64 learning_rate, - Float64 l2_reg_coef, - UInt64 param_num, - UInt64 batch_capacity, - std::shared_ptr gradient_computer, - std::shared_ptr weights_updater); + Float64 learning_rate_, + Float64 l2_reg_coef_, + UInt64 param_num_, + UInt64 batch_capacity_, + std::shared_ptr gradient_computer_, + std::shared_ptr weights_updater_); void add(const IColumn ** columns, size_t row_num); @@ -304,21 +304,21 @@ public: String getName() const override { return Name::name; } explicit AggregateFunctionMLMethod( - UInt32 param_num, - std::unique_ptr gradient_computer, - std::string weights_updater_name, - Float64 learning_rate, - Float64 l2_reg_coef, - UInt64 batch_size, + UInt32 param_num_, + std::unique_ptr gradient_computer_, + std::string weights_updater_name_, + Float64 learning_rate_, + Float64 l2_reg_coef_, + UInt64 batch_size_, const DataTypes & arguments_types, const Array & params) : IAggregateFunctionDataHelper>(arguments_types, params) - , param_num(param_num) - , learning_rate(learning_rate) - , l2_reg_coef(l2_reg_coef) - , batch_size(batch_size) - , gradient_computer(std::move(gradient_computer)) - , weights_updater_name(std::move(weights_updater_name)) + , param_num(param_num_) + , learning_rate(learning_rate_) + , l2_reg_coef(l2_reg_coef_) + , batch_size(batch_size_) + , gradient_computer(std::move(gradient_computer_)) + , weights_updater_name(std::move(weights_updater_name_)) { } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 6660e03b529..019968994b1 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -679,8 +679,8 @@ private: DataTypePtr & type; public: - AggregateFunctionsSingleValue(const DataTypePtr & type) - : IAggregateFunctionDataHelper>({type}, {}) + AggregateFunctionsSingleValue(const DataTypePtr & type_) + : IAggregateFunctionDataHelper>({type_}, {}) , type(this->argument_types[0]) { if (StringRef(Data::name()) == StringRef("min") diff --git a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h b/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h index 2e9ec914b99..1461b1bcae9 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionQuantile.h @@ -76,8 +76,8 @@ private: DataTypePtr & argument_type; public: - AggregateFunctionQuantile(const DataTypePtr & argument_type, const Array & params) - : IAggregateFunctionDataHelper>({argument_type}, params) + AggregateFunctionQuantile(const DataTypePtr & argument_type_, const Array & params) + : IAggregateFunctionDataHelper>({argument_type_}, params) , levels(params, returns_many), level(levels.levels[0]), argument_type(this->argument_types[0]) { if (!returns_many && levels.size() > 1) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionResample.h b/dbms/src/AggregateFunctions/AggregateFunctionResample.h index e82f08366df..894e0e18f51 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionResample.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionResample.h @@ -33,18 +33,18 @@ private: public: AggregateFunctionResample( - AggregateFunctionPtr nested_function, - Key begin, - Key end, - size_t step, + AggregateFunctionPtr nested_function_, + Key begin_, + Key end_, + size_t step_, const DataTypes & arguments, const Array & params) : IAggregateFunctionHelper>{arguments, params} - , nested_function{nested_function} + , nested_function{nested_function_} , last_col{arguments.size() - 1} - , begin{begin} - , end{end} - , step{step} + , begin{begin_} + , end{end_} + , step{step_} , total{0} , aod{nested_function->alignOfData()} , sod{(nested_function->sizeOfData() + aod - 1) / aod * aod} diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h b/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h index 80860fdb62a..e4b6985316f 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionSequenceMatch.h @@ -142,9 +142,9 @@ template class AggregateFunctionSequenceBase : public IAggregateFunctionDataHelper { public: - AggregateFunctionSequenceBase(const DataTypes & arguments, const Array & params, const String & pattern) + AggregateFunctionSequenceBase(const DataTypes & arguments, const Array & params, const String & pattern_) : IAggregateFunctionDataHelper(arguments, params) - , pattern(pattern) + , pattern(pattern_) { arg_count = arguments.size(); parsePattern(); @@ -199,7 +199,7 @@ private: std::uint64_t extra; PatternAction() = default; - PatternAction(const PatternActionType type, const std::uint64_t extra = 0) : type{type}, extra{extra} {} + PatternAction(const PatternActionType type_, const std::uint64_t extra_ = 0) : type{type_}, extra{extra_} {} }; using PatternActions = PODArrayWithStackMemory; @@ -520,8 +520,8 @@ private: struct DFAState { - DFAState(bool has_kleene = false) - : has_kleene{has_kleene}, event{0}, transition{DFATransition::None} + DFAState(bool has_kleene_ = false) + : has_kleene{has_kleene_}, event{0}, transition{DFATransition::None} {} /// .-------. @@ -554,8 +554,8 @@ template class AggregateFunctionSequenceMatch final : public AggregateFunctionSequenceBase> { public: - AggregateFunctionSequenceMatch(const DataTypes & arguments, const Array & params, const String & pattern) - : AggregateFunctionSequenceBase>(arguments, params, pattern) {} + AggregateFunctionSequenceMatch(const DataTypes & arguments, const Array & params, const String & pattern_) + : AggregateFunctionSequenceBase>(arguments, params, pattern_) {} using AggregateFunctionSequenceBase>::AggregateFunctionSequenceBase; @@ -582,8 +582,8 @@ template class AggregateFunctionSequenceCount final : public AggregateFunctionSequenceBase> { public: - AggregateFunctionSequenceCount(const DataTypes & arguments, const Array & params, const String & pattern) - : AggregateFunctionSequenceBase>(arguments, params, pattern) {} + AggregateFunctionSequenceCount(const DataTypes & arguments, const Array & params, const String & pattern_) + : AggregateFunctionSequenceBase>(arguments, params, pattern_) {} using AggregateFunctionSequenceBase>::AggregateFunctionSequenceBase; diff --git a/dbms/src/AggregateFunctions/AggregateFunctionState.h b/dbms/src/AggregateFunctions/AggregateFunctionState.h index 2d8e5c6a537..1f49ac80db9 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionState.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionState.h @@ -23,9 +23,9 @@ private: Array params; public: - AggregateFunctionState(AggregateFunctionPtr nested, const DataTypes & arguments, const Array & params) - : IAggregateFunctionHelper(arguments, params) - , nested_func(nested), arguments(arguments), params(params) {} + AggregateFunctionState(AggregateFunctionPtr nested_, const DataTypes & arguments_, const Array & params_) + : IAggregateFunctionHelper(arguments_, params_) + , nested_func(nested_), arguments(arguments_), params(params_) {} String getName() const override { diff --git a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h b/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h index 6837379f98f..34bc92edfa0 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionSumMap.h @@ -62,10 +62,10 @@ private: public: AggregateFunctionSumMapBase( - const DataTypePtr & keys_type, const DataTypes & values_types, + const DataTypePtr & keys_type_, const DataTypes & values_types_, const DataTypes & argument_types_, const Array & params_) : IAggregateFunctionDataHelper>, Derived>(argument_types_, params_) - , keys_type(keys_type), values_types(values_types) {} + , keys_type(keys_type_), values_types(values_types_) {} String getName() const override { return "sumMap"; } @@ -295,9 +295,9 @@ private: public: AggregateFunctionSumMapFiltered( - const DataTypePtr & keys_type, const DataTypes & values_types, const Array & keys_to_keep_, + const DataTypePtr & keys_type_, const DataTypes & values_types_, const Array & keys_to_keep_, const DataTypes & argument_types_, const Array & params_) - : Base{keys_type, values_types, argument_types_, params_} + : Base{keys_type_, values_types_, argument_types_, params_} { keys_to_keep.reserve(keys_to_keep_.size()); for (const Field & f : keys_to_keep_) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionTopK.h b/dbms/src/AggregateFunctions/AggregateFunctionTopK.h index 72b724843a1..9bcfa07b78e 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionTopK.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionTopK.h @@ -44,9 +44,9 @@ protected: UInt64 reserved; public: - AggregateFunctionTopK(UInt64 threshold, UInt64 load_factor, const DataTypes & argument_types_, const Array & params) + AggregateFunctionTopK(UInt64 threshold_, UInt64 load_factor, const DataTypes & argument_types_, const Array & params) : IAggregateFunctionDataHelper, AggregateFunctionTopK>(argument_types_, params) - , threshold(threshold), reserved(load_factor * threshold) {} + , threshold(threshold_), reserved(load_factor * threshold) {} String getName() const override { return is_weighted ? "topKWeighted" : "topK"; } @@ -139,9 +139,9 @@ private: public: AggregateFunctionTopKGeneric( - UInt64 threshold, UInt64 load_factor, const DataTypePtr & input_data_type, const Array & params) - : IAggregateFunctionDataHelper>({input_data_type}, params) - , threshold(threshold), reserved(load_factor * threshold), input_data_type(this->argument_types[0]) {} + UInt64 threshold_, UInt64 load_factor, const DataTypePtr & input_data_type_, const Array & params) + : IAggregateFunctionDataHelper>({input_data_type_}, params) + , threshold(threshold_), reserved(load_factor * threshold), input_data_type(this->argument_types[0]) {} String getName() const override { return is_weighted ? "topKWeighted" : "topK"; } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h b/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h index de9ca69c17f..88e16f330ce 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionUniqUpTo.h @@ -136,9 +136,9 @@ private: UInt8 threshold; public: - AggregateFunctionUniqUpTo(UInt8 threshold, const DataTypes & argument_types_, const Array & params_) + AggregateFunctionUniqUpTo(UInt8 threshold_, const DataTypes & argument_types_, const Array & params_) : IAggregateFunctionDataHelper, AggregateFunctionUniqUpTo>(argument_types_, params_) - , threshold(threshold) + , threshold(threshold_) { } @@ -196,9 +196,9 @@ private: UInt8 threshold; public: - AggregateFunctionUniqUpToVariadic(const DataTypes & arguments, const Array & params, UInt8 threshold) + AggregateFunctionUniqUpToVariadic(const DataTypes & arguments, const Array & params, UInt8 threshold_) : IAggregateFunctionDataHelper, AggregateFunctionUniqUpToVariadic>(arguments, params) - , threshold(threshold) + , threshold(threshold_) { if (argument_is_tuple) num_args = typeid_cast(*arguments[0]).getElements().size(); diff --git a/dbms/src/AggregateFunctions/QuantileTDigest.h b/dbms/src/AggregateFunctions/QuantileTDigest.h index f7201ef3b0d..91211d25173 100644 --- a/dbms/src/AggregateFunctions/QuantileTDigest.h +++ b/dbms/src/AggregateFunctions/QuantileTDigest.h @@ -50,9 +50,9 @@ class QuantileTDigest Centroid() = default; - explicit Centroid(Value mean, Count count) - : mean(mean) - , count(count) + explicit Centroid(Value mean_, Count count_) + : mean(mean_) + , count(count_) {} Centroid & operator+=(const Centroid & other) diff --git a/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h b/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h index 4beeecd93bc..52d0181fce1 100644 --- a/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h +++ b/dbms/src/AggregateFunctions/ReservoirSamplerDeterministic.h @@ -53,8 +53,8 @@ class ReservoirSamplerDeterministic } public: - ReservoirSamplerDeterministic(const size_t sample_count = DEFAULT_SAMPLE_COUNT) - : sample_count{sample_count} + ReservoirSamplerDeterministic(const size_t sample_count_ = DEFAULT_SAMPLE_COUNT) + : sample_count{sample_count_} { } diff --git a/dbms/src/Columns/ColumnConst.cpp b/dbms/src/Columns/ColumnConst.cpp index 3703d24f1cb..91036499871 100644 --- a/dbms/src/Columns/ColumnConst.cpp +++ b/dbms/src/Columns/ColumnConst.cpp @@ -13,8 +13,8 @@ namespace ErrorCodes extern const int SIZES_OF_COLUMNS_DOESNT_MATCH; } -ColumnConst::ColumnConst(const ColumnPtr & data_, size_t s) - : data(data_), s(s) +ColumnConst::ColumnConst(const ColumnPtr & data_, size_t s_) + : data(data_), s(s_) { /// Squash Const of Const. while (const ColumnConst * const_data = typeid_cast(data.get())) diff --git a/dbms/src/Columns/ColumnConst.h b/dbms/src/Columns/ColumnConst.h index 6731061a797..6b320f12f28 100644 --- a/dbms/src/Columns/ColumnConst.h +++ b/dbms/src/Columns/ColumnConst.h @@ -26,7 +26,7 @@ private: WrappedPtr data; size_t s; - ColumnConst(const ColumnPtr & data, size_t s); + ColumnConst(const ColumnPtr & data, size_t s_); ColumnConst(const ColumnConst & src) = default; public: diff --git a/dbms/src/Columns/ColumnDecimal.cpp b/dbms/src/Columns/ColumnDecimal.cpp index 55c22884144..5e475b64dfa 100644 --- a/dbms/src/Columns/ColumnDecimal.cpp +++ b/dbms/src/Columns/ColumnDecimal.cpp @@ -26,10 +26,12 @@ namespace ErrorCodes template int ColumnDecimal::compareAt(size_t n, size_t m, const IColumn & rhs_, int) const { - auto other = static_cast(rhs_); + auto & other = static_cast(rhs_); const T & a = data[n]; const T & b = other.data[m]; + if (scale == other.scale) + return a > b ? 1 : (a < b ? -1 : 0); return decimalLess(b, a, other.scale, scale) ? 1 : (decimalLess(a, b, scale, other.scale) ? -1 : 0); } diff --git a/dbms/src/Columns/ColumnFunction.cpp b/dbms/src/Columns/ColumnFunction.cpp index 4fb34959ddc..75fe22446f4 100644 --- a/dbms/src/Columns/ColumnFunction.cpp +++ b/dbms/src/Columns/ColumnFunction.cpp @@ -13,8 +13,8 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -ColumnFunction::ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture) - : size_(size), function(function) +ColumnFunction::ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture) + : size_(size), function(function_) { appendArguments(columns_to_capture); } diff --git a/dbms/src/Columns/ColumnFunction.h b/dbms/src/Columns/ColumnFunction.h index 571123ae892..77475b97da8 100644 --- a/dbms/src/Columns/ColumnFunction.h +++ b/dbms/src/Columns/ColumnFunction.h @@ -20,7 +20,7 @@ class ColumnFunction final : public COWHelper private: friend class COWHelper; - ColumnFunction(size_t size, FunctionBasePtr function, const ColumnsWithTypeAndName & columns_to_capture); + ColumnFunction(size_t size, FunctionBasePtr function_, const ColumnsWithTypeAndName & columns_to_capture); public: const char * getFamilyName() const override { return "Function"; } diff --git a/dbms/src/Columns/ColumnLowCardinality.cpp b/dbms/src/Columns/ColumnLowCardinality.cpp index 1dbb3f8574f..152570f1a6f 100644 --- a/dbms/src/Columns/ColumnLowCardinality.cpp +++ b/dbms/src/Columns/ColumnLowCardinality.cpp @@ -360,12 +360,12 @@ bool ColumnLowCardinality::containsNull() const ColumnLowCardinality::Index::Index() : positions(ColumnUInt8::create()), size_of_type(sizeof(UInt8)) {} -ColumnLowCardinality::Index::Index(MutableColumnPtr && positions) : positions(std::move(positions)) +ColumnLowCardinality::Index::Index(MutableColumnPtr && positions_) : positions(std::move(positions_)) { updateSizeOfType(); } -ColumnLowCardinality::Index::Index(ColumnPtr positions) : positions(std::move(positions)) +ColumnLowCardinality::Index::Index(ColumnPtr positions_) : positions(std::move(positions_)) { updateSizeOfType(); } diff --git a/dbms/src/Columns/ColumnLowCardinality.h b/dbms/src/Columns/ColumnLowCardinality.h index a6a129fbb09..9081938e2c6 100644 --- a/dbms/src/Columns/ColumnLowCardinality.h +++ b/dbms/src/Columns/ColumnLowCardinality.h @@ -201,8 +201,8 @@ public: public: Index(); Index(const Index & other) = default; - explicit Index(MutableColumnPtr && positions); - explicit Index(ColumnPtr positions); + explicit Index(MutableColumnPtr && positions_); + explicit Index(ColumnPtr positions_); const ColumnPtr & getPositions() const { return positions; } WrappedPtr & getPositionsPtr() { return positions; } diff --git a/dbms/src/Columns/ColumnTuple.cpp b/dbms/src/Columns/ColumnTuple.cpp index 3ad7f007edf..bef717361df 100644 --- a/dbms/src/Columns/ColumnTuple.cpp +++ b/dbms/src/Columns/ColumnTuple.cpp @@ -257,8 +257,8 @@ struct ColumnTuple::Less TupleColumns columns; int nan_direction_hint; - Less(const TupleColumns & columns, int nan_direction_hint_) - : columns(columns), nan_direction_hint(nan_direction_hint_) + Less(const TupleColumns & columns_, int nan_direction_hint_) + : columns(columns_), nan_direction_hint(nan_direction_hint_) { } diff --git a/dbms/src/Columns/ColumnUnique.h b/dbms/src/Columns/ColumnUnique.h index 0c5efd8058d..154bb457f01 100644 --- a/dbms/src/Columns/ColumnUnique.h +++ b/dbms/src/Columns/ColumnUnique.h @@ -186,10 +186,10 @@ ColumnUnique::ColumnUnique(const IDataType & type) } template -ColumnUnique::ColumnUnique(MutableColumnPtr && holder, bool is_nullable) +ColumnUnique::ColumnUnique(MutableColumnPtr && holder, bool is_nullable_) : column_holder(std::move(holder)) - , is_nullable(is_nullable) - , index(numSpecialValues(is_nullable), 0) + , is_nullable(is_nullable_) + , index(numSpecialValues(is_nullable_), 0) { if (column_holder->size() < numSpecialValues()) throw Exception("Too small holder column for ColumnUnique.", ErrorCodes::ILLEGAL_COLUMN); diff --git a/dbms/src/Columns/ReverseIndex.h b/dbms/src/Columns/ReverseIndex.h index 43d191bbc3e..8fa4e87680b 100644 --- a/dbms/src/Columns/ReverseIndex.h +++ b/dbms/src/Columns/ReverseIndex.h @@ -235,8 +235,8 @@ template class ReverseIndex { public: - explicit ReverseIndex(UInt64 num_prefix_rows_to_skip, UInt64 base_index) - : num_prefix_rows_to_skip(num_prefix_rows_to_skip), base_index(base_index), saved_hash_ptr(nullptr) {} + explicit ReverseIndex(UInt64 num_prefix_rows_to_skip_, UInt64 base_index_) + : num_prefix_rows_to_skip(num_prefix_rows_to_skip_), base_index(base_index_), saved_hash_ptr(nullptr) {} void setColumn(ColumnType * column_); diff --git a/dbms/src/Common/Allocator.h b/dbms/src/Common/Allocator.h index 8d2ab415aaf..dc1d6ff5df9 100644 --- a/dbms/src/Common/Allocator.h +++ b/dbms/src/Common/Allocator.h @@ -265,13 +265,12 @@ using Allocator = AllocatorWithHint +template class AllocatorWithStackMemory : private Base { private: - char stack_memory[N]; + alignas(Alignment) char stack_memory[N]; public: /// Do not use boost::noncopyable to avoid the warning about direct base @@ -291,7 +290,7 @@ public: return stack_memory; } - return Base::alloc(size); + return Base::alloc(size, Alignment); } void free(void * buf, size_t size) @@ -308,10 +307,10 @@ public: /// Already was big enough to not fit in stack_memory. if (old_size > N) - return Base::realloc(buf, old_size, new_size); + return Base::realloc(buf, old_size, new_size, Alignment); /// Was in stack memory, but now will not fit there. - void * new_buf = Base::alloc(new_size); + void * new_buf = Base::alloc(new_size, Alignment); memcpy(new_buf, buf, old_size); return new_buf; } diff --git a/dbms/src/Common/ColumnsHashing.h b/dbms/src/Common/ColumnsHashing.h index 661f6527d8e..bf564738f7a 100644 --- a/dbms/src/Common/ColumnsHashing.h +++ b/dbms/src/Common/ColumnsHashing.h @@ -243,11 +243,11 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod throw Exception("Cache wasn't created for HashMethodSingleLowCardinalityColumn", ErrorCodes::LOGICAL_ERROR); - LowCardinalityDictionaryCache * cache; + LowCardinalityDictionaryCache * lcd_cache; if constexpr (use_cache) { - cache = typeid_cast(context.get()); - if (!cache) + lcd_cache = typeid_cast(context.get()); + if (!lcd_cache) { const auto & cached_val = *context; throw Exception("Invalid type for HashMethodSingleLowCardinalityColumn cache: " @@ -267,7 +267,7 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod { dictionary_key = {column->getDictionary().getHash(), dict->size()}; if constexpr (use_cache) - cached_values = cache->get(dictionary_key); + cached_values = lcd_cache->get(dictionary_key); } if (cached_values) @@ -288,7 +288,7 @@ struct HashMethodSingleLowCardinalityColumn : public SingleColumnMethod cached_values->saved_hash = saved_hash; cached_values->dictionary_holder = dictionary_holder; - cache->set(dictionary_key, cached_values); + lcd_cache->set(dictionary_key, cached_values); } } } @@ -470,8 +470,8 @@ struct HashMethodKeysFixed Sizes key_sizes; size_t keys_size; - HashMethodKeysFixed(const ColumnRawPtrs & key_columns, const Sizes & key_sizes, const HashMethodContextPtr &) - : Base(key_columns), key_sizes(std::move(key_sizes)), keys_size(key_columns.size()) + HashMethodKeysFixed(const ColumnRawPtrs & key_columns, const Sizes & key_sizes_, const HashMethodContextPtr &) + : Base(key_columns), key_sizes(std::move(key_sizes_)), keys_size(key_columns.size()) { if constexpr (has_low_cardinality) { @@ -525,8 +525,8 @@ struct HashMethodSerialized ColumnRawPtrs key_columns; size_t keys_size; - HashMethodSerialized(const ColumnRawPtrs & key_columns, const Sizes & /*key_sizes*/, const HashMethodContextPtr &) - : key_columns(key_columns), keys_size(key_columns.size()) {} + HashMethodSerialized(const ColumnRawPtrs & key_columns_, const Sizes & /*key_sizes*/, const HashMethodContextPtr &) + : key_columns(key_columns_), keys_size(key_columns_.size()) {} protected: friend class columns_hashing_impl::HashMethodBase; @@ -550,8 +550,8 @@ struct HashMethodHashed ColumnRawPtrs key_columns; - HashMethodHashed(ColumnRawPtrs key_columns, const Sizes &, const HashMethodContextPtr &) - : key_columns(std::move(key_columns)) {} + HashMethodHashed(ColumnRawPtrs key_columns_, const Sizes &, const HashMethodContextPtr &) + : key_columns(std::move(key_columns_)) {} ALWAYS_INLINE Key getKey(size_t row, Arena &) const { return hash128(row, key_columns.size(), key_columns); } diff --git a/dbms/src/Common/ColumnsHashingImpl.h b/dbms/src/Common/ColumnsHashingImpl.h index 2a6cdb6cd69..d980a3f1b64 100644 --- a/dbms/src/Common/ColumnsHashingImpl.h +++ b/dbms/src/Common/ColumnsHashingImpl.h @@ -56,8 +56,8 @@ class EmplaceResultImpl bool inserted; public: - EmplaceResultImpl(Mapped & value, Mapped & cached_value, bool inserted) - : value(value), cached_value(cached_value), inserted(inserted) {} + EmplaceResultImpl(Mapped & value_, Mapped & cached_value_, bool inserted_) + : value(value_), cached_value(cached_value_), inserted(inserted_) {} bool isInserted() const { return inserted; } auto & getMapped() const { return value; } @@ -75,7 +75,7 @@ class EmplaceResultImpl bool inserted; public: - explicit EmplaceResultImpl(bool inserted) : inserted(inserted) {} + explicit EmplaceResultImpl(bool inserted_) : inserted(inserted_) {} bool isInserted() const { return inserted; } }; @@ -86,7 +86,7 @@ class FindResultImpl bool found; public: - FindResultImpl(Mapped * value, bool found) : value(value), found(found) {} + FindResultImpl(Mapped * value_, bool found_) : value(value_), found(found_) {} bool isFound() const { return found; } Mapped & getMapped() const { return *value; } }; @@ -97,7 +97,7 @@ class FindResultImpl bool found; public: - explicit FindResultImpl(bool found) : found(found) {} + explicit FindResultImpl(bool found_) : found(found_) {} bool isFound() const { return found; } }; diff --git a/dbms/src/Common/CounterInFile.h b/dbms/src/Common/CounterInFile.h index cbf7105a728..537d399e84f 100644 --- a/dbms/src/Common/CounterInFile.h +++ b/dbms/src/Common/CounterInFile.h @@ -67,13 +67,13 @@ public: int fd = ::open(path.c_str(), O_RDWR | O_CREAT, 0666); if (-1 == fd) - DB::throwFromErrno("Cannot open file " + path, DB::ErrorCodes::CANNOT_OPEN_FILE); + DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE); try { int flock_ret = flock(fd, LOCK_EX); if (-1 == flock_ret) - DB::throwFromErrno("Cannot lock file " + path, DB::ErrorCodes::CANNOT_OPEN_FILE); + DB::throwFromErrnoWithPath("Cannot lock file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE); if (!file_doesnt_exists) { @@ -141,7 +141,7 @@ public: int fd = ::open(path.c_str(), O_RDWR | O_CREAT, 0666); if (-1 == fd) - DB::throwFromErrno("Cannot open file " + path, DB::ErrorCodes::CANNOT_OPEN_FILE); + DB::throwFromErrnoWithPath("Cannot open file " + path, path, DB::ErrorCodes::CANNOT_OPEN_FILE); try { diff --git a/dbms/src/Common/CurrentMetrics.h b/dbms/src/Common/CurrentMetrics.h index 43c85caba3c..b87504ef49a 100644 --- a/dbms/src/Common/CurrentMetrics.h +++ b/dbms/src/Common/CurrentMetrics.h @@ -59,15 +59,15 @@ namespace CurrentMetrics std::atomic * what; Value amount; - Increment(std::atomic * what, Value amount) - : what(what), amount(amount) + Increment(std::atomic * what_, Value amount_) + : what(what_), amount(amount_) { *what += amount; } public: - Increment(Metric metric, Value amount = 1) - : Increment(&values[metric], amount) {} + Increment(Metric metric, Value amount_ = 1) + : Increment(&values[metric], amount_) {} ~Increment() { diff --git a/dbms/src/Common/Elf.cpp b/dbms/src/Common/Elf.cpp index bb51b837a13..035477d0243 100644 --- a/dbms/src/Common/Elf.cpp +++ b/dbms/src/Common/Elf.cpp @@ -55,8 +55,8 @@ Elf::Elf(const std::string & path) } -Elf::Section::Section(const ElfShdr & header, const Elf & elf) - : header(header), elf(elf) +Elf::Section::Section(const ElfShdr & header_, const Elf & elf_) + : header(header_), elf(elf_) { } diff --git a/dbms/src/Common/Elf.h b/dbms/src/Common/Elf.h index 7f7fcc538b5..869b869b530 100644 --- a/dbms/src/Common/Elf.h +++ b/dbms/src/Common/Elf.h @@ -35,7 +35,7 @@ public: const char * end() const; size_t size() const; - Section(const ElfShdr & header, const Elf & elf); + Section(const ElfShdr & header_, const Elf & elf_); private: const Elf & elf; diff --git a/dbms/src/Common/Exception.cpp b/dbms/src/Common/Exception.cpp index 958f254fe92..0b64c65b791 100644 --- a/dbms/src/Common/Exception.cpp +++ b/dbms/src/Common/Exception.cpp @@ -9,6 +9,9 @@ #include #include #include +#include +#include +#include namespace DB { @@ -52,6 +55,11 @@ void throwFromErrno(const std::string & s, int code, int e) throw ErrnoException(s + ", " + errnoToString(code, e), code, e); } +void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code, int the_errno) +{ + throw ErrnoException(s + ", " + errnoToString(code, the_errno), code, the_errno, path); +} + void tryLogCurrentException(const char * log_name, const std::string & start_of_message) { tryLogCurrentException(&Logger::get(log_name), start_of_message); @@ -68,7 +76,52 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_ } } -std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded_stacktrace) +void getNoSpaceLeftInfoMessage(std::filesystem::path path, std::string & msg) +{ + path = std::filesystem::absolute(path); + /// It's possible to get ENOSPC for non existent file (e.g. if there are no free inodes and creat() fails) + /// So try to get info for existent parent directory. + while (!std::filesystem::exists(path) && path.has_relative_path()) + path = path.parent_path(); + + auto fs = DiskSpaceMonitor::getStatVFS(path); + msg += "\nTotal space: " + formatReadableSizeWithBinarySuffix(fs.f_blocks * fs.f_bsize) + + "\nAvailable space: " + formatReadableSizeWithBinarySuffix(fs.f_bavail * fs.f_bsize) + + "\nTotal inodes: " + formatReadableQuantity(fs.f_files) + + "\nAvailable inodes: " + formatReadableQuantity(fs.f_favail); + + auto mount_point = DiskSpaceMonitor::getMountPoint(path).string(); + msg += "\nMount point: " + mount_point; +#if defined(__linux__) + msg += "\nFilesystem: " + DiskSpaceMonitor::getFilesystemName(mount_point); +#endif +} + +std::string getExtraExceptionInfo(const std::exception & e) +{ + String msg; + try + { + if (auto file_exception = dynamic_cast(&e)) + { + if (file_exception->code() == ENOSPC) + getNoSpaceLeftInfoMessage(file_exception->message(), msg); + } + else if (auto errno_exception = dynamic_cast(&e)) + { + if (errno_exception->getErrno() == ENOSPC && errno_exception->getPath()) + getNoSpaceLeftInfoMessage(errno_exception->getPath().value(), msg); + } + } + catch (...) + { + msg += "\nCannot print extra info: " + getCurrentExceptionMessage(false, false, false); + } + + return msg; +} + +std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded_stacktrace /*= false*/, bool with_extra_info /*= true*/) { std::stringstream stream; @@ -78,7 +131,9 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded } catch (const Exception & e) { - stream << getExceptionMessage(e, with_stacktrace, check_embedded_stacktrace) << " (version " << VERSION_STRING << VERSION_OFFICIAL << ")"; + stream << getExceptionMessage(e, with_stacktrace, check_embedded_stacktrace) + << (with_extra_info ? getExtraExceptionInfo(e) : "") + << " (version " << VERSION_STRING << VERSION_OFFICIAL << ")"; } catch (const Poco::Exception & e) { @@ -86,7 +141,8 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded { stream << "Poco::Exception. Code: " << ErrorCodes::POCO_EXCEPTION << ", e.code() = " << e.code() << ", e.displayText() = " << e.displayText() - << " (version " << VERSION_STRING << VERSION_OFFICIAL << ")"; + << (with_extra_info ? getExtraExceptionInfo(e) : "") + << " (version " << VERSION_STRING << VERSION_OFFICIAL; } catch (...) {} } @@ -100,7 +156,9 @@ std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded if (status) name += " (demangling status: " + toString(status) + ")"; - stream << "std::exception. Code: " << ErrorCodes::STD_EXCEPTION << ", type: " << name << ", e.what() = " << e.what() << ", version = " << VERSION_STRING << VERSION_OFFICIAL; + stream << "std::exception. Code: " << ErrorCodes::STD_EXCEPTION << ", type: " << name << ", e.what() = " << e.what() + << (with_extra_info ? getExtraExceptionInfo(e) : "") + << ", version = " << VERSION_STRING << VERSION_OFFICIAL; } catch (...) {} } diff --git a/dbms/src/Common/Exception.h b/dbms/src/Common/Exception.h index 6b0656f4828..bd4d6e0be09 100644 --- a/dbms/src/Common/Exception.h +++ b/dbms/src/Common/Exception.h @@ -52,16 +52,18 @@ private: class ErrnoException : public Exception { public: - ErrnoException(const std::string & msg, int code, int saved_errno_) - : Exception(msg, code), saved_errno(saved_errno_) {} + ErrnoException(const std::string & msg, int code, int saved_errno_, const std::optional & path_ = {}) + : Exception(msg, code), saved_errno(saved_errno_), path(path_) {} ErrnoException * clone() const override { return new ErrnoException(*this); } void rethrow() const override { throw *this; } int getErrno() const { return saved_errno; } + const std::optional getPath() const { return path; } private: int saved_errno; + std::optional path; const char * name() const throw() override { return "DB::ErrnoException"; } const char * className() const throw() override { return "DB::ErrnoException"; } @@ -73,6 +75,8 @@ using Exceptions = std::vector; std::string errnoToString(int code, int the_errno = errno); [[noreturn]] void throwFromErrno(const std::string & s, int code, int the_errno = errno); +[[noreturn]] void throwFromErrnoWithPath(const std::string & s, const std::string & path, int code, + int the_errno = errno); /** Try to write an exception to the log (and forget about it). @@ -87,7 +91,8 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_ * check_embedded_stacktrace - if DB::Exception has embedded stacktrace then * only this stack trace will be printed. */ -std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded_stacktrace = false); +std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded_stacktrace = false, + bool with_extra_info = true); /// Returns error code from ErrorCodes int getCurrentExceptionCode(); diff --git a/dbms/src/Common/FieldVisitors.cpp b/dbms/src/Common/FieldVisitors.cpp index f77977b3eed..9a437d5ffe6 100644 --- a/dbms/src/Common/FieldVisitors.cpp +++ b/dbms/src/Common/FieldVisitors.cpp @@ -167,7 +167,7 @@ String FieldVisitorToString::operator() (const Tuple & x_def) const } -FieldVisitorHash::FieldVisitorHash(SipHash & hash) : hash(hash) {} +FieldVisitorHash::FieldVisitorHash(SipHash & hash_) : hash(hash_) {} void FieldVisitorHash::operator() (const Null &) const { diff --git a/dbms/src/Common/FieldVisitors.h b/dbms/src/Common/FieldVisitors.h index 5575c607b3b..56d3c84decc 100644 --- a/dbms/src/Common/FieldVisitors.h +++ b/dbms/src/Common/FieldVisitors.h @@ -222,7 +222,7 @@ class FieldVisitorHash : public StaticVisitor<> private: SipHash & hash; public: - FieldVisitorHash(SipHash & hash); + FieldVisitorHash(SipHash & hash_); void operator() (const Null & x) const; void operator() (const UInt64 & x) const; diff --git a/dbms/src/Common/HashTable/Hash.h b/dbms/src/Common/HashTable/Hash.h index 6e786e1ddbf..0f740163179 100644 --- a/dbms/src/Common/HashTable/Hash.h +++ b/dbms/src/Common/HashTable/Hash.h @@ -3,6 +3,8 @@ #include #include +#include + /** Hash functions that are better than the trivial function std::hash. * @@ -57,8 +59,6 @@ inline DB::UInt64 intHashCRC32(DB::UInt64 x) } -template struct DefaultHash; - template inline size_t DefaultHash64(T key) { @@ -72,28 +72,18 @@ inline size_t DefaultHash64(T key) return intHash64(u.out); } -#define DEFINE_HASH(T) \ -template <> struct DefaultHash\ -{\ - size_t operator() (T key) const\ - {\ - return DefaultHash64(key);\ - }\ +template +struct DefaultHash; + +template +struct DefaultHash>> +{ + size_t operator() (T key) const + { + return DefaultHash64(key); + } }; -DEFINE_HASH(DB::UInt8) -DEFINE_HASH(DB::UInt16) -DEFINE_HASH(DB::UInt32) -DEFINE_HASH(DB::UInt64) -DEFINE_HASH(DB::Int8) -DEFINE_HASH(DB::Int16) -DEFINE_HASH(DB::Int32) -DEFINE_HASH(DB::Int64) -DEFINE_HASH(DB::Float32) -DEFINE_HASH(DB::Float64) - -#undef DEFINE_HASH - template struct HashCRC32; diff --git a/dbms/src/Common/MemoryTracker.h b/dbms/src/Common/MemoryTracker.h index 4ce0ac262fa..620e1c44c83 100644 --- a/dbms/src/Common/MemoryTracker.h +++ b/dbms/src/Common/MemoryTracker.h @@ -31,9 +31,9 @@ class MemoryTracker const char * description = nullptr; public: - MemoryTracker(VariableContext level = VariableContext::Thread) : level(level) {} - MemoryTracker(Int64 limit_, VariableContext level = VariableContext::Thread) : limit(limit_), level(level) {} - MemoryTracker(MemoryTracker * parent_, VariableContext level = VariableContext::Thread) : parent(parent_), level(level) {} + MemoryTracker(VariableContext level_ = VariableContext::Thread) : level(level_) {} + MemoryTracker(Int64 limit_, VariableContext level_ = VariableContext::Thread) : limit(limit_), level(level_) {} + MemoryTracker(MemoryTracker * parent_, VariableContext level_ = VariableContext::Thread) : parent(parent_), level(level_) {} ~MemoryTracker(); diff --git a/dbms/src/Common/PODArray.h b/dbms/src/Common/PODArray.h index 01085a2c5a7..523927ce40a 100644 --- a/dbms/src/Common/PODArray.h +++ b/dbms/src/Common/PODArray.h @@ -636,6 +636,6 @@ using PaddedPODArray = PODArray; template using PODArrayWithStackMemory = PODArray, rounded_bytes>>; + AllocatorWithStackMemory, rounded_bytes, alignof(T)>>; } diff --git a/dbms/src/Common/ProfileEvents.cpp b/dbms/src/Common/ProfileEvents.cpp index e9b11c823ed..22cc9e57a2f 100644 --- a/dbms/src/Common/ProfileEvents.cpp +++ b/dbms/src/Common/ProfileEvents.cpp @@ -191,10 +191,10 @@ Counters global_counters(global_counters_array); const Event Counters::num_counters = END; -Counters::Counters(VariableContext level, Counters * parent) +Counters::Counters(VariableContext level_, Counters * parent_) : counters_holder(new Counter[num_counters] {}), - parent(parent), - level(level) + parent(parent_), + level(level_) { counters = counters_holder.get(); } diff --git a/dbms/src/Common/ProfileEvents.h b/dbms/src/Common/ProfileEvents.h index 78b4ebbf42e..ca327c9810b 100644 --- a/dbms/src/Common/ProfileEvents.h +++ b/dbms/src/Common/ProfileEvents.h @@ -33,7 +33,7 @@ namespace ProfileEvents VariableContext level = VariableContext::Thread; /// By default, any instance have to increment global counters - Counters(VariableContext level = VariableContext::Thread, Counters * parent = &global_counters); + Counters(VariableContext level_ = VariableContext::Thread, Counters * parent_ = &global_counters); /// Global level static initializer Counters(Counter * allocated_counters) diff --git a/dbms/src/Common/QueryProfiler.cpp b/dbms/src/Common/QueryProfiler.cpp index 64ac6311065..51d139d8fe0 100644 --- a/dbms/src/Common/QueryProfiler.cpp +++ b/dbms/src/Common/QueryProfiler.cpp @@ -127,9 +127,9 @@ namespace ErrorCodes } template -QueryProfilerBase::QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal) +QueryProfilerBase::QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal_) : log(&Logger::get("QueryProfiler")) - , pause_signal(pause_signal) + , pause_signal(pause_signal_) { #if USE_INTERNAL_UNWIND_LIBRARY /// Sanity check. diff --git a/dbms/src/Common/QueryProfiler.h b/dbms/src/Common/QueryProfiler.h index 5eaf5e2e7f7..48b5ffc8b2c 100644 --- a/dbms/src/Common/QueryProfiler.h +++ b/dbms/src/Common/QueryProfiler.h @@ -35,7 +35,7 @@ template class QueryProfilerBase { public: - QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal); + QueryProfilerBase(const Int32 thread_id, const int clock_type, UInt32 period, const int pause_signal_); ~QueryProfilerBase(); private: diff --git a/dbms/src/Common/RWLock.cpp b/dbms/src/Common/RWLock.cpp index cbdf8b842c6..e343ce0b0cd 100644 --- a/dbms/src/Common/RWLock.cpp +++ b/dbms/src/Common/RWLock.cpp @@ -161,9 +161,9 @@ RWLockImpl::LockHolderImpl::~LockHolderImpl() } -RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent, RWLockImpl::GroupsContainer::iterator it_group, - RWLockImpl::ClientsContainer::iterator it_client) - : parent{std::move(parent)}, it_group{it_group}, it_client{it_client}, +RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent_, RWLockImpl::GroupsContainer::iterator it_group_, + RWLockImpl::ClientsContainer::iterator it_client_) + : parent{std::move(parent_)}, it_group{it_group_}, it_client{it_client_}, active_client_increment{(*it_client == RWLockImpl::Read) ? CurrentMetrics::RWLockActiveReaders : CurrentMetrics::RWLockActiveWriters} {} diff --git a/dbms/src/Common/RWLock.h b/dbms/src/Common/RWLock.h index 02a1e7cdb94..0467901fa27 100644 --- a/dbms/src/Common/RWLock.h +++ b/dbms/src/Common/RWLock.h @@ -68,7 +68,7 @@ private: std::condition_variable cv; /// all clients of the group wait group condvar - explicit Group(Type type) : type{type} {} + explicit Group(Type type_) : type{type_} {} }; mutable std::mutex mutex; diff --git a/dbms/src/Common/ShellCommand.cpp b/dbms/src/Common/ShellCommand.cpp index 66dbab35a20..8807d795a0d 100644 --- a/dbms/src/Common/ShellCommand.cpp +++ b/dbms/src/Common/ShellCommand.cpp @@ -34,13 +34,13 @@ namespace ErrorCodes extern const int CANNOT_CREATE_CHILD_PROCESS; } -ShellCommand::ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_) - : pid(pid) +ShellCommand::ShellCommand(pid_t pid_, int in_fd_, int out_fd_, int err_fd_, bool terminate_in_destructor_) + : pid(pid_) , terminate_in_destructor(terminate_in_destructor_) , log(&Poco::Logger::get("ShellCommand")) - , in(in_fd) - , out(out_fd) - , err(err_fd) {} + , in(in_fd_) + , out(out_fd_) + , err(err_fd_) {} ShellCommand::~ShellCommand() { diff --git a/dbms/src/Common/ShellCommand.h b/dbms/src/Common/ShellCommand.h index 3d1308272e9..0298ec73a2b 100644 --- a/dbms/src/Common/ShellCommand.h +++ b/dbms/src/Common/ShellCommand.h @@ -32,7 +32,7 @@ private: Poco::Logger * log; - ShellCommand(pid_t pid, int in_fd, int out_fd, int err_fd, bool terminate_in_destructor_); + ShellCommand(pid_t pid_, int in_fd_, int out_fd_, int err_fd_, bool terminate_in_destructor_); static std::unique_ptr executeImpl(const char * filename, char * const argv[], bool pipe_stdin_only, bool terminate_in_destructor); diff --git a/dbms/src/Common/StatusFile.cpp b/dbms/src/Common/StatusFile.cpp index afe42262b55..e0f7788815c 100644 --- a/dbms/src/Common/StatusFile.cpp +++ b/dbms/src/Common/StatusFile.cpp @@ -51,7 +51,7 @@ StatusFile::StatusFile(const std::string & path_) fd = ::open(path.c_str(), O_WRONLY | O_CREAT, 0666); if (-1 == fd) - throwFromErrno("Cannot open file " + path, ErrorCodes::CANNOT_OPEN_FILE); + throwFromErrnoWithPath("Cannot open file " + path, path, ErrorCodes::CANNOT_OPEN_FILE); try { @@ -61,14 +61,14 @@ StatusFile::StatusFile(const std::string & path_) if (errno == EWOULDBLOCK) throw Exception("Cannot lock file " + path + ". Another server instance in same directory is already running.", ErrorCodes::CANNOT_OPEN_FILE); else - throwFromErrno("Cannot lock file " + path, ErrorCodes::CANNOT_OPEN_FILE); + throwFromErrnoWithPath("Cannot lock file " + path, path, ErrorCodes::CANNOT_OPEN_FILE); } if (0 != ftruncate(fd, 0)) - throwFromErrno("Cannot ftruncate " + path, ErrorCodes::CANNOT_TRUNCATE_FILE); + throwFromErrnoWithPath("Cannot ftruncate " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE); if (0 != lseek(fd, 0, SEEK_SET)) - throwFromErrno("Cannot lseek " + path, ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + throwFromErrnoWithPath("Cannot lseek " + path, path, ErrorCodes::CANNOT_SEEK_THROUGH_FILE); /// Write information about current server instance to the file. { diff --git a/dbms/src/Common/Stopwatch.h b/dbms/src/Common/Stopwatch.h index 7bfaccc72b2..6db345c219a 100644 --- a/dbms/src/Common/Stopwatch.h +++ b/dbms/src/Common/Stopwatch.h @@ -86,7 +86,7 @@ public: operator bool() const { return parent != nullptr; } - Lock(AtomicStopwatch * parent) : parent(parent) {} + Lock(AtomicStopwatch * parent_) : parent(parent_) {} Lock(Lock &&) = default; diff --git a/dbms/src/Common/StringSearcher.h b/dbms/src/Common/StringSearcher.h index f722ebc6c55..5e78ff23df1 100644 --- a/dbms/src/Common/StringSearcher.h +++ b/dbms/src/Common/StringSearcher.h @@ -75,8 +75,8 @@ private: #endif public: - StringSearcher(const char * const needle_, const size_t needle_size) - : needle{reinterpret_cast(needle_)}, needle_size{needle_size} + StringSearcher(const char * const needle_, const size_t needle_size_) + : needle{reinterpret_cast(needle_)}, needle_size{needle_size_} { if (0 == needle_size) return; @@ -714,8 +714,8 @@ struct LibCASCIICaseSensitiveStringSearcher { const char * const needle; - LibCASCIICaseSensitiveStringSearcher(const char * const needle, const size_t /* needle_size */) - : needle(needle) {} + LibCASCIICaseSensitiveStringSearcher(const char * const needle_, const size_t /* needle_size */) + : needle(needle_) {} const UInt8 * search(const UInt8 * haystack, const UInt8 * const haystack_end) const { @@ -735,8 +735,8 @@ struct LibCASCIICaseInsensitiveStringSearcher { const char * const needle; - LibCASCIICaseInsensitiveStringSearcher(const char * const needle, const size_t /* needle_size */) - : needle(needle) {} + LibCASCIICaseInsensitiveStringSearcher(const char * const needle_, const size_t /* needle_size */) + : needle(needle_) {} const UInt8 * search(const UInt8 * haystack, const UInt8 * const haystack_end) const { diff --git a/dbms/src/Common/ThreadPool.cpp b/dbms/src/Common/ThreadPool.cpp index ce004ed7674..e790ac07839 100644 --- a/dbms/src/Common/ThreadPool.cpp +++ b/dbms/src/Common/ThreadPool.cpp @@ -22,14 +22,14 @@ namespace CurrentMetrics template -ThreadPoolImpl::ThreadPoolImpl(size_t max_threads) - : ThreadPoolImpl(max_threads, max_threads, max_threads) +ThreadPoolImpl::ThreadPoolImpl(size_t max_threads_) + : ThreadPoolImpl(max_threads_, max_threads_, max_threads_) { } template -ThreadPoolImpl::ThreadPoolImpl(size_t max_threads, size_t max_free_threads, size_t queue_size) - : max_threads(max_threads), max_free_threads(max_free_threads), queue_size(queue_size) +ThreadPoolImpl::ThreadPoolImpl(size_t max_threads_, size_t max_free_threads_, size_t queue_size_) + : max_threads(max_threads_), max_free_threads(max_free_threads_), queue_size(queue_size_) { } diff --git a/dbms/src/Common/ThreadPool.h b/dbms/src/Common/ThreadPool.h index 23c0848e931..4354b9194b0 100644 --- a/dbms/src/Common/ThreadPool.h +++ b/dbms/src/Common/ThreadPool.h @@ -31,10 +31,10 @@ public: using Job = std::function; /// Size is constant. Up to num_threads are created on demand and then run until shutdown. - explicit ThreadPoolImpl(size_t max_threads); + explicit ThreadPoolImpl(size_t max_threads_); /// queue_size - maximum number of running plus scheduled jobs. It can be greater than max_threads. Zero means unlimited. - ThreadPoolImpl(size_t max_threads, size_t max_free_threads, size_t queue_size); + ThreadPoolImpl(size_t max_threads_, size_t max_free_threads_, size_t queue_size_); /// Add new job. Locks until number of scheduled jobs is less than maximum or exception in one of threads was thrown. /// If an exception in some thread was thrown, method silently returns, and exception will be rethrown only on call to 'wait' function. @@ -81,8 +81,8 @@ private: Job job; int priority; - JobWithPriority(Job job, int priority) - : job(job), priority(priority) {} + JobWithPriority(Job job_, int priority_) + : job(job_), priority(priority_) {} bool operator< (const JobWithPriority & rhs) const { diff --git a/dbms/src/Common/Throttler.h b/dbms/src/Common/Throttler.h index 3ad50215b9e..a23b0f9db22 100644 --- a/dbms/src/Common/Throttler.h +++ b/dbms/src/Common/Throttler.h @@ -36,12 +36,12 @@ namespace ErrorCodes class Throttler { public: - Throttler(size_t max_speed_, const std::shared_ptr & parent = nullptr) - : max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent) {} + Throttler(size_t max_speed_, const std::shared_ptr & parent_ = nullptr) + : max_speed(max_speed_), limit_exceeded_exception_message(""), parent(parent_) {} Throttler(size_t max_speed_, size_t limit_, const char * limit_exceeded_exception_message_, - const std::shared_ptr & parent = nullptr) - : max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent) {} + const std::shared_ptr & parent_ = nullptr) + : max_speed(max_speed_), limit(limit_), limit_exceeded_exception_message(limit_exceeded_exception_message_), parent(parent_) {} void add(const size_t amount) { diff --git a/dbms/src/Common/TraceCollector.cpp b/dbms/src/Common/TraceCollector.cpp index ff5de756886..6ed2074e47d 100644 --- a/dbms/src/Common/TraceCollector.cpp +++ b/dbms/src/Common/TraceCollector.cpp @@ -28,9 +28,9 @@ namespace ErrorCodes extern const int CANNOT_FCNTL; } -TraceCollector::TraceCollector(std::shared_ptr & trace_log) +TraceCollector::TraceCollector(std::shared_ptr & trace_log_) : log(&Poco::Logger::get("TraceCollector")) - , trace_log(trace_log) + , trace_log(trace_log_) { if (trace_log == nullptr) throw Exception("Invalid trace log pointer passed", ErrorCodes::NULL_POINTER_DEREFERENCE); diff --git a/dbms/src/Common/TraceCollector.h b/dbms/src/Common/TraceCollector.h index 7c07f48776f..5d1b3775356 100644 --- a/dbms/src/Common/TraceCollector.h +++ b/dbms/src/Common/TraceCollector.h @@ -24,7 +24,7 @@ private: static void notifyToStop(); public: - TraceCollector(std::shared_ptr & trace_log); + TraceCollector(std::shared_ptr & trace_log_); ~TraceCollector(); }; diff --git a/dbms/src/Common/UInt128.h b/dbms/src/Common/UInt128.h index 92758ec80ff..b895c514c3e 100644 --- a/dbms/src/Common/UInt128.h +++ b/dbms/src/Common/UInt128.h @@ -28,7 +28,7 @@ struct UInt128 UInt64 high; UInt128() = default; - explicit UInt128(const UInt64 low, const UInt64 high) : low(low), high(high) {} + explicit UInt128(const UInt64 low_, const UInt64 high_) : low(low_), high(high_) {} explicit UInt128(const UInt64 rhs) : low(rhs), high() {} auto tuple() const { return std::tie(high, low); } diff --git a/dbms/src/Common/Volnitsky.h b/dbms/src/Common/Volnitsky.h index 907af50aefa..748cbe09138 100644 --- a/dbms/src/Common/Volnitsky.h +++ b/dbms/src/Common/Volnitsky.h @@ -331,11 +331,11 @@ public: * If you specify it small enough, the fallback algorithm will be used, * since it is considered that it's useless to waste time initializing the hash table. */ - VolnitskyBase(const char * const needle, const size_t needle_size, size_t haystack_size_hint = 0) - : needle{reinterpret_cast(needle)} - , needle_size{needle_size} + VolnitskyBase(const char * const needle_, const size_t needle_size_, size_t haystack_size_hint = 0) + : needle{reinterpret_cast(needle_)} + , needle_size{needle_size_} , fallback{VolnitskyTraits::isFallbackNeedle(needle_size, haystack_size_hint)} - , fallback_searcher{needle, needle_size} + , fallback_searcher{needle_, needle_size} { if (fallback) return; diff --git a/dbms/src/Common/ZooKeeper/IKeeper.cpp b/dbms/src/Common/ZooKeeper/IKeeper.cpp index 114b14d0118..34cfd02b78d 100644 --- a/dbms/src/Common/ZooKeeper/IKeeper.cpp +++ b/dbms/src/Common/ZooKeeper/IKeeper.cpp @@ -23,8 +23,8 @@ namespace ProfileEvents namespace Coordination { -Exception::Exception(const std::string & msg, const int32_t code, int) - : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code) +Exception::Exception(const std::string & msg, const int32_t code_, int) + : DB::Exception(msg, DB::ErrorCodes::KEEPER_EXCEPTION), code(code_) { if (Coordination::isUserError(code)) ProfileEvents::increment(ProfileEvents::ZooKeeperUserExceptions); @@ -34,18 +34,18 @@ Exception::Exception(const std::string & msg, const int32_t code, int) ProfileEvents::increment(ProfileEvents::ZooKeeperOtherExceptions); } -Exception::Exception(const std::string & msg, const int32_t code) - : Exception(msg + " (" + errorMessage(code) + ")", code, 0) +Exception::Exception(const std::string & msg, const int32_t code_) + : Exception(msg + " (" + errorMessage(code_) + ")", code_, 0) { } -Exception::Exception(const int32_t code) - : Exception(errorMessage(code), code, 0) +Exception::Exception(const int32_t code_) + : Exception(errorMessage(code_), code_, 0) { } -Exception::Exception(const int32_t code, const std::string & path) - : Exception(std::string{errorMessage(code)} + ", path: " + path, code, 0) +Exception::Exception(const int32_t code_, const std::string & path) + : Exception(std::string{errorMessage(code_)} + ", path: " + path, code_, 0) { } diff --git a/dbms/src/Common/ZooKeeper/IKeeper.h b/dbms/src/Common/ZooKeeper/IKeeper.h index b4ecb9a7ceb..f415e0306e8 100644 --- a/dbms/src/Common/ZooKeeper/IKeeper.h +++ b/dbms/src/Common/ZooKeeper/IKeeper.h @@ -301,12 +301,12 @@ class Exception : public DB::Exception { private: /// Delegate constructor, used to minimize repetition; last parameter used for overload resolution. - Exception(const std::string & msg, const int32_t code, int); + Exception(const std::string & msg, const int32_t code_, int); public: - explicit Exception(const int32_t code); - Exception(const std::string & msg, const int32_t code); - Exception(const int32_t code, const std::string & path); + explicit Exception(const int32_t code_); + Exception(const std::string & msg, const int32_t code_); + Exception(const int32_t code_, const std::string & path); Exception(const Exception & exc); const char * name() const throw() override { return "Coordination::Exception"; } diff --git a/dbms/src/Common/ZooKeeper/TestKeeper.cpp b/dbms/src/Common/ZooKeeper/TestKeeper.cpp index eb42c6d0464..7c7d9bba016 100644 --- a/dbms/src/Common/ZooKeeper/TestKeeper.cpp +++ b/dbms/src/Common/ZooKeeper/TestKeeper.cpp @@ -418,8 +418,8 @@ ResponsePtr TestKeeperCheckRequest::createResponse() const { return std::make_sh ResponsePtr TestKeeperMultiRequest::createResponse() const { return std::make_shared(); } -TestKeeper::TestKeeper(const String & root_path_, Poco::Timespan operation_timeout) - : root_path(root_path_), operation_timeout(operation_timeout) +TestKeeper::TestKeeper(const String & root_path_, Poco::Timespan operation_timeout_) + : root_path(root_path_), operation_timeout(operation_timeout_) { container.emplace("/", Node()); diff --git a/dbms/src/Common/ZooKeeper/TestKeeper.h b/dbms/src/Common/ZooKeeper/TestKeeper.h index 6b26e4cf8a1..01c92c98778 100644 --- a/dbms/src/Common/ZooKeeper/TestKeeper.h +++ b/dbms/src/Common/ZooKeeper/TestKeeper.h @@ -33,7 +33,7 @@ using TestKeeperRequestPtr = std::shared_ptr; class TestKeeper : public IKeeper { public: - TestKeeper(const String & root_path, Poco::Timespan operation_timeout); + TestKeeper(const String & root_path_, Poco::Timespan operation_timeout_); ~TestKeeper() override; bool isExpired() const override { return expired; } diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp b/dbms/src/Common/ZooKeeper/ZooKeeper.cpp index caebc59ce7f..f60085195ed 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.cpp +++ b/dbms/src/Common/ZooKeeper/ZooKeeper.cpp @@ -106,10 +106,10 @@ void ZooKeeper::init(const std::string & implementation, const std::string & hos throw KeeperException("Zookeeper root doesn't exist. You should create root node " + chroot + " before start.", Coordination::ZNONODE); } -ZooKeeper::ZooKeeper(const std::string & hosts, const std::string & identity, int32_t session_timeout_ms, - int32_t operation_timeout_ms, const std::string & chroot, const std::string & implementation) +ZooKeeper::ZooKeeper(const std::string & hosts_, const std::string & identity_, int32_t session_timeout_ms_, + int32_t operation_timeout_ms_, const std::string & chroot_, const std::string & implementation) { - init(implementation, hosts, identity, session_timeout_ms, operation_timeout_ms, chroot); + init(implementation, hosts_, identity_, session_timeout_ms_, operation_timeout_ms_, chroot_); } struct ZooKeeperArgs @@ -891,9 +891,9 @@ size_t KeeperMultiException::getFailedOpIndex(int32_t exception_code, const Coor } -KeeperMultiException::KeeperMultiException(int32_t exception_code, const Coordination::Requests & requests, const Coordination::Responses & responses) +KeeperMultiException::KeeperMultiException(int32_t exception_code, const Coordination::Requests & requests_, const Coordination::Responses & responses_) : KeeperException("Transaction failed", exception_code), - requests(requests), responses(responses), failed_op_index(getFailedOpIndex(exception_code, responses)) + requests(requests_), responses(responses_), failed_op_index(getFailedOpIndex(exception_code, responses)) { addMessage("Op #" + std::to_string(failed_op_index) + ", path: " + getPathForFirstFailedOp()); } diff --git a/dbms/src/Common/ZooKeeper/ZooKeeper.h b/dbms/src/Common/ZooKeeper/ZooKeeper.h index a888759c134..5bae272102d 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeper.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeper.h @@ -52,10 +52,10 @@ class ZooKeeper public: using Ptr = std::shared_ptr; - ZooKeeper(const std::string & hosts, const std::string & identity = "", - int32_t session_timeout_ms = DEFAULT_SESSION_TIMEOUT, - int32_t operation_timeout_ms = DEFAULT_OPERATION_TIMEOUT, - const std::string & chroot = "", + ZooKeeper(const std::string & hosts_, const std::string & identity_ = "", + int32_t session_timeout_ms_ = DEFAULT_SESSION_TIMEOUT, + int32_t operation_timeout_ms_ = DEFAULT_OPERATION_TIMEOUT, + const std::string & chroot_ = "", const std::string & implementation = "zookeeper"); /** Config of the form: diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp index 41e18b95fcf..be91d4b6d93 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp +++ b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.cpp @@ -758,17 +758,17 @@ struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse { ZooKeeper::OpNum op_num; bool done; - int32_t error; + int32_t error_; Coordination::read(op_num, in); Coordination::read(done, in); - Coordination::read(error, in); + Coordination::read(error_, in); if (!done) throw Exception("Too many results received for multi transaction", ZMARSHALLINGERROR); if (op_num != -1) throw Exception("Unexpected op_num received at the end of results for multi transaction", ZMARSHALLINGERROR); - if (error != -1) + if (error_ != -1) throw Exception("Unexpected error value received at the end of results for multi transaction", ZMARSHALLINGERROR); } } @@ -821,12 +821,12 @@ ZooKeeper::ZooKeeper( const String & root_path_, const String & auth_scheme, const String & auth_data, - Poco::Timespan session_timeout, + Poco::Timespan session_timeout_, Poco::Timespan connection_timeout, - Poco::Timespan operation_timeout) + Poco::Timespan operation_timeout_) : root_path(root_path_), - session_timeout(session_timeout), - operation_timeout(std::min(operation_timeout, session_timeout)) + session_timeout(session_timeout_), + operation_timeout(std::min(operation_timeout_, session_timeout_)) { if (!root_path.empty()) { diff --git a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h index 24868571d0d..1588c74b412 100644 --- a/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h +++ b/dbms/src/Common/ZooKeeper/ZooKeeperImpl.h @@ -108,9 +108,9 @@ public: const String & root_path, const String & auth_scheme, const String & auth_data, - Poco::Timespan session_timeout, + Poco::Timespan session_timeout_, Poco::Timespan connection_timeout, - Poco::Timespan operation_timeout); + Poco::Timespan operation_timeout_); ~ZooKeeper() override; diff --git a/dbms/src/Common/createHardLink.cpp b/dbms/src/Common/createHardLink.cpp index 824b7e99086..5ed82231b26 100644 --- a/dbms/src/Common/createHardLink.cpp +++ b/dbms/src/Common/createHardLink.cpp @@ -26,16 +26,19 @@ void createHardLink(const String & source_path, const String & destination_path) struct stat destination_descr; if (0 != lstat(source_path.c_str(), &source_descr)) - throwFromErrno("Cannot stat " + source_path, ErrorCodes::CANNOT_STAT); + throwFromErrnoWithPath("Cannot stat " + source_path, source_path, ErrorCodes::CANNOT_STAT); if (0 != lstat(destination_path.c_str(), &destination_descr)) - throwFromErrno("Cannot stat " + destination_path, ErrorCodes::CANNOT_STAT); + throwFromErrnoWithPath("Cannot stat " + destination_path, destination_path, ErrorCodes::CANNOT_STAT); if (source_descr.st_ino != destination_descr.st_ino) - throwFromErrno("Destination file " + destination_path + " is already exist and have different inode.", ErrorCodes::CANNOT_LINK, link_errno); + throwFromErrnoWithPath( + "Destination file " + destination_path + " is already exist and have different inode.", + destination_path, ErrorCodes::CANNOT_LINK, link_errno); } else - throwFromErrno("Cannot link " + source_path + " to " + destination_path, ErrorCodes::CANNOT_LINK); + throwFromErrnoWithPath("Cannot link " + source_path + " to " + destination_path, destination_path, + ErrorCodes::CANNOT_LINK); } } diff --git a/dbms/src/Common/hex.h b/dbms/src/Common/hex.h index f5ca4904b8c..81fa725e17d 100644 --- a/dbms/src/Common/hex.h +++ b/dbms/src/Common/hex.h @@ -42,7 +42,7 @@ inline void writeHexByteLowercase(UInt8 byte, void * out) /// Produces hex representation of an unsigned int with leading zeros (for checksums) template -inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table) +inline void writeHexUIntImpl(TUInt uint_, char * out, const char * const table) { union { @@ -50,7 +50,7 @@ inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table) UInt8 uint8[sizeof(TUInt)]; }; - value = uint; + value = uint_; /// Use little endian for (size_t i = 0; i < sizeof(TUInt); ++i) @@ -58,30 +58,30 @@ inline void writeHexUIntImpl(TUInt uint, char * out, const char * const table) } template -inline void writeHexUIntUppercase(TUInt uint, char * out) +inline void writeHexUIntUppercase(TUInt uint_, char * out) { - writeHexUIntImpl(uint, out, hex_byte_to_char_uppercase_table); + writeHexUIntImpl(uint_, out, hex_byte_to_char_uppercase_table); } template -inline void writeHexUIntLowercase(TUInt uint, char * out) +inline void writeHexUIntLowercase(TUInt uint_, char * out) { - writeHexUIntImpl(uint, out, hex_byte_to_char_lowercase_table); + writeHexUIntImpl(uint_, out, hex_byte_to_char_lowercase_table); } template -std::string getHexUIntUppercase(TUInt uint) +std::string getHexUIntUppercase(TUInt uint_) { std::string res(sizeof(TUInt) * 2, '\0'); - writeHexUIntUppercase(uint, res.data()); + writeHexUIntUppercase(uint_, res.data()); return res; } template -std::string getHexUIntLowercase(TUInt uint) +std::string getHexUIntLowercase(TUInt uint_) { std::string res(sizeof(TUInt) * 2, '\0'); - writeHexUIntLowercase(uint, res.data()); + writeHexUIntLowercase(uint_, res.data()); return res; } diff --git a/dbms/src/Common/tests/arena_with_free_lists.cpp b/dbms/src/Common/tests/arena_with_free_lists.cpp index 4d4915f5dcc..20a3e547da0 100644 --- a/dbms/src/Common/tests/arena_with_free_lists.cpp +++ b/dbms/src/Common/tests/arena_with_free_lists.cpp @@ -137,17 +137,17 @@ struct Dictionary enum class AttributeUnderlyingType { - UInt8, - UInt16, - UInt32, - UInt64, - Int8, - Int16, - Int32, - Int64, - Float32, - Float64, - String + utUInt8, + utUInt16, + utUInt32, + utUInt64, + utInt8, + utInt16, + utInt32, + utInt64, + utFloat32, + utFloat64, + utString }; struct Attribute final @@ -172,17 +172,17 @@ struct Dictionary { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingType::utString: { const auto & string = value.get(); auto & string_ref = std::get>(attribute.arrays)[idx]; @@ -308,7 +308,7 @@ int main(int argc, char ** argv) constexpr size_t cache_size = 1024; Dictionary::Attribute attr; - attr.type = Dictionary::AttributeUnderlyingType::String; + attr.type = Dictionary::AttributeUnderlyingType::utString; std::get>(attr.arrays).reset(new StringRef[cache_size]{}); while (true) diff --git a/dbms/src/Common/tests/cow_columns.cpp b/dbms/src/Common/tests/cow_columns.cpp index dad2ba13de5..7b629e264e5 100644 --- a/dbms/src/Common/tests/cow_columns.cpp +++ b/dbms/src/Common/tests/cow_columns.cpp @@ -28,7 +28,7 @@ private: friend class COWHelper; int data; - ConcreteColumn(int data) : data(data) {} + ConcreteColumn(int data_) : data(data_) {} ConcreteColumn(const ConcreteColumn &) = default; MutableColumnPtr test() const override diff --git a/dbms/src/Common/tests/cow_compositions.cpp b/dbms/src/Common/tests/cow_compositions.cpp index a48624d7d64..8d0110a0290 100644 --- a/dbms/src/Common/tests/cow_compositions.cpp +++ b/dbms/src/Common/tests/cow_compositions.cpp @@ -30,7 +30,7 @@ private: friend class COWHelper; int data; - ConcreteColumn(int data) : data(data) {} + ConcreteColumn(int data_) : data(data_) {} ConcreteColumn(const ConcreteColumn &) = default; public: diff --git a/dbms/src/Common/tests/mi_malloc_test.cpp b/dbms/src/Common/tests/mi_malloc_test.cpp index d9ee75fba6e..ce1e4a3a770 100644 --- a/dbms/src/Common/tests/mi_malloc_test.cpp +++ b/dbms/src/Common/tests/mi_malloc_test.cpp @@ -58,8 +58,8 @@ struct Allocation Allocation() {} - Allocation(size_t size) - : size(size) + Allocation(size_t size_) + : size(size_) { ptr = malloc(size); if (!ptr) diff --git a/dbms/src/Compression/CompressionCodecMultiple.cpp b/dbms/src/Compression/CompressionCodecMultiple.cpp index a40fbafefc9..23c244f4dcb 100644 --- a/dbms/src/Compression/CompressionCodecMultiple.cpp +++ b/dbms/src/Compression/CompressionCodecMultiple.cpp @@ -18,8 +18,8 @@ extern const int UNKNOWN_CODEC; extern const int CORRUPTED_DATA; } -CompressionCodecMultiple::CompressionCodecMultiple(Codecs codecs) - : codecs(codecs) +CompressionCodecMultiple::CompressionCodecMultiple(Codecs codecs_) + : codecs(codecs_) { } diff --git a/dbms/src/Compression/CompressionCodecMultiple.h b/dbms/src/Compression/CompressionCodecMultiple.h index 3770266e915..8702a7ab538 100644 --- a/dbms/src/Compression/CompressionCodecMultiple.h +++ b/dbms/src/Compression/CompressionCodecMultiple.h @@ -9,7 +9,7 @@ class CompressionCodecMultiple final : public ICompressionCodec { public: CompressionCodecMultiple() = default; - explicit CompressionCodecMultiple(Codecs codecs); + explicit CompressionCodecMultiple(Codecs codecs_); UInt8 getMethodByte() const override; diff --git a/dbms/src/Compression/LZ4_decompress_faster.cpp b/dbms/src/Compression/LZ4_decompress_faster.cpp index 65ffdb2173b..77f9e226de4 100644 --- a/dbms/src/Compression/LZ4_decompress_faster.cpp +++ b/dbms/src/Compression/LZ4_decompress_faster.cpp @@ -537,7 +537,6 @@ void decompress( if (source_size == 0 || dest_size == 0) return; - /// Don't run timer if the block is too small. if (dest_size >= 32768) { diff --git a/dbms/src/Compression/LZ4_decompress_faster.h b/dbms/src/Compression/LZ4_decompress_faster.h index ff29c205276..dd923279ebf 100644 --- a/dbms/src/Compression/LZ4_decompress_faster.h +++ b/dbms/src/Compression/LZ4_decompress_faster.h @@ -123,7 +123,7 @@ struct PerformanceStatistics } PerformanceStatistics() {} - PerformanceStatistics(ssize_t choose_method) : choose_method(choose_method) {} + PerformanceStatistics(ssize_t choose_method_) : choose_method(choose_method_) {} }; diff --git a/dbms/src/Compression/tests/gtest_compressionCodec.cpp b/dbms/src/Compression/tests/gtest_compressionCodec.cpp index 2b2f6927ed4..0f03070fff3 100644 --- a/dbms/src/Compression/tests/gtest_compressionCodec.cpp +++ b/dbms/src/Compression/tests/gtest_compressionCodec.cpp @@ -334,10 +334,10 @@ auto SequentialGenerator = [](auto stride = 1) template struct MonotonicGenerator { - MonotonicGenerator(T stride = 1, size_t max_step = 10) + MonotonicGenerator(T stride_ = 1, size_t max_step_ = 10) : prev_value(0), - stride(stride), - max_step(max_step) + stride(stride_), + max_step(max_step_) {} template @@ -369,9 +369,9 @@ auto MinMaxGenerator = [](auto i) template struct RandomGenerator { - RandomGenerator(T seed = 0, T value_cap = std::numeric_limits::max()) + RandomGenerator(T seed = 0, T value_cap_ = std::numeric_limits::max()) : e(seed), - value_cap(value_cap) + value_cap(value_cap_) { } diff --git a/dbms/src/Core/BackgroundSchedulePool.cpp b/dbms/src/Core/BackgroundSchedulePool.cpp index ee63fdbadff..732be068569 100644 --- a/dbms/src/Core/BackgroundSchedulePool.cpp +++ b/dbms/src/Core/BackgroundSchedulePool.cpp @@ -23,7 +23,7 @@ namespace DB class TaskNotification final : public Poco::Notification { public: - explicit TaskNotification(const BackgroundSchedulePoolTaskInfoPtr & task) : task(task) {} + explicit TaskNotification(const BackgroundSchedulePoolTaskInfoPtr & task_) : task(task_) {} void execute() { task->execute(); } private: @@ -155,8 +155,8 @@ Coordination::WatchCallback BackgroundSchedulePoolTaskInfo::getWatchCallback() } -BackgroundSchedulePool::BackgroundSchedulePool(size_t size) - : size(size) +BackgroundSchedulePool::BackgroundSchedulePool(size_t size_) + : size(size_) { LOG_INFO(&Logger::get("BackgroundSchedulePool"), "Create BackgroundSchedulePool with " << size << " threads"); diff --git a/dbms/src/Core/BackgroundSchedulePool.h b/dbms/src/Core/BackgroundSchedulePool.h index f2627366da7..5a1f6489a7b 100644 --- a/dbms/src/Core/BackgroundSchedulePool.h +++ b/dbms/src/Core/BackgroundSchedulePool.h @@ -49,7 +49,7 @@ public: size_t getNumberOfThreads() const { return size; } - BackgroundSchedulePool(size_t size); + BackgroundSchedulePool(size_t size_); ~BackgroundSchedulePool(); private: diff --git a/dbms/src/Core/ExternalResultDescription.cpp b/dbms/src/Core/ExternalResultDescription.cpp index c41d8486552..8ca6795d2e9 100644 --- a/dbms/src/Core/ExternalResultDescription.cpp +++ b/dbms/src/Core/ExternalResultDescription.cpp @@ -33,33 +33,33 @@ void ExternalResultDescription::init(const Block & sample_block_) const IDataType * type = type_not_nullable.get(); if (typeid_cast(type)) - types.emplace_back(ValueType::UInt8, is_nullable); + types.emplace_back(ValueType::vtUInt8, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::UInt16, is_nullable); + types.emplace_back(ValueType::vtUInt16, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::UInt32, is_nullable); + types.emplace_back(ValueType::vtUInt32, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::UInt64, is_nullable); + types.emplace_back(ValueType::vtUInt64, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Int8, is_nullable); + types.emplace_back(ValueType::vtInt8, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Int16, is_nullable); + types.emplace_back(ValueType::vtInt16, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Int32, is_nullable); + types.emplace_back(ValueType::vtInt32, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Int64, is_nullable); + types.emplace_back(ValueType::vtInt64, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Float32, is_nullable); + types.emplace_back(ValueType::vtFloat32, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Float64, is_nullable); + types.emplace_back(ValueType::vtFloat64, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::String, is_nullable); + types.emplace_back(ValueType::vtString, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::Date, is_nullable); + types.emplace_back(ValueType::vtDate, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::DateTime, is_nullable); + types.emplace_back(ValueType::vtDateTime, is_nullable); else if (typeid_cast(type)) - types.emplace_back(ValueType::UUID, is_nullable); + types.emplace_back(ValueType::vtUUID, is_nullable); else throw Exception{"Unsupported type " + type->getName(), ErrorCodes::UNKNOWN_TYPE}; } diff --git a/dbms/src/Core/ExternalResultDescription.h b/dbms/src/Core/ExternalResultDescription.h index c59104019b7..0bd77afa628 100644 --- a/dbms/src/Core/ExternalResultDescription.h +++ b/dbms/src/Core/ExternalResultDescription.h @@ -12,20 +12,20 @@ struct ExternalResultDescription { enum struct ValueType { - UInt8, - UInt16, - UInt32, - UInt64, - Int8, - Int16, - Int32, - Int64, - Float32, - Float64, - String, - Date, - DateTime, - UUID, + vtUInt8, + vtUInt16, + vtUInt32, + vtUInt64, + vtInt8, + vtInt16, + vtInt32, + vtInt64, + vtFloat32, + vtFloat64, + vtString, + vtDate, + vtDateTime, + vtUUID, }; Block sample_block; diff --git a/dbms/src/Core/MySQLProtocol.h b/dbms/src/Core/MySQLProtocol.h index ccd127352ed..b7a8d514bc2 100644 --- a/dbms/src/Core/MySQLProtocol.h +++ b/dbms/src/Core/MySQLProtocol.h @@ -137,10 +137,10 @@ public: class PacketPayloadReadBuffer : public ReadBuffer { public: - PacketPayloadReadBuffer(ReadBuffer & in, uint8_t & sequence_id) - : ReadBuffer(in.position(), 0) // not in.buffer().begin(), because working buffer may include previous packet - , in(in) - , sequence_id(sequence_id) + PacketPayloadReadBuffer(ReadBuffer & in_, uint8_t & sequence_id_) + : ReadBuffer(in_.position(), 0) // not in.buffer().begin(), because working buffer may include previous packet + , in(in_) + , sequence_id(sequence_id_) { } @@ -245,8 +245,8 @@ public: class PacketPayloadWriteBuffer : public WriteBuffer { public: - PacketPayloadWriteBuffer(WriteBuffer & out, size_t payload_length, uint8_t & sequence_id) - : WriteBuffer(out.position(), 0), out(out), sequence_id(sequence_id), total_left(payload_length) + PacketPayloadWriteBuffer(WriteBuffer & out_, size_t payload_length_, uint8_t & sequence_id_) + : WriteBuffer(out_.position(), 0), out(out_), sequence_id(sequence_id_), total_left(payload_length_) { startNewPacket(); setWorkingBuffer(); @@ -347,18 +347,18 @@ public: size_t max_packet_size = MAX_PACKET_LENGTH; /// For reading and writing. - PacketSender(ReadBuffer & in, WriteBuffer & out, uint8_t & sequence_id) - : sequence_id(sequence_id) - , in(&in) - , out(&out) + PacketSender(ReadBuffer & in_, WriteBuffer & out_, uint8_t & sequence_id_) + : sequence_id(sequence_id_) + , in(&in_) + , out(&out_) { } /// For writing. - PacketSender(WriteBuffer & out, uint8_t & sequence_id) - : sequence_id(sequence_id) + PacketSender(WriteBuffer & out_, uint8_t & sequence_id_) + : sequence_id(sequence_id_) , in(nullptr) - , out(&out) + , out(&out_) { } @@ -421,15 +421,15 @@ class Handshake : public WritePacket String auth_plugin_name; String auth_plugin_data; public: - explicit Handshake(uint32_t capability_flags, uint32_t connection_id, String server_version, String auth_plugin_name, String auth_plugin_data) + explicit Handshake(uint32_t capability_flags_, uint32_t connection_id_, String server_version_, String auth_plugin_name_, String auth_plugin_data_) : protocol_version(0xa) - , server_version(std::move(server_version)) - , connection_id(connection_id) - , capability_flags(capability_flags) + , server_version(std::move(server_version_)) + , connection_id(connection_id_) + , capability_flags(capability_flags_) , character_set(CharacterSet::utf8_general_ci) , status_flags(0) - , auth_plugin_name(std::move(auth_plugin_name)) - , auth_plugin_data(std::move(auth_plugin_data)) + , auth_plugin_name(std::move(auth_plugin_name_)) + , auth_plugin_data(std::move(auth_plugin_data_)) { } @@ -532,8 +532,8 @@ class AuthSwitchRequest : public WritePacket String plugin_name; String auth_plugin_data; public: - AuthSwitchRequest(String plugin_name, String auth_plugin_data) - : plugin_name(std::move(plugin_name)), auth_plugin_data(std::move(auth_plugin_data)) + AuthSwitchRequest(String plugin_name_, String auth_plugin_data_) + : plugin_name(std::move(plugin_name_)), auth_plugin_data(std::move(auth_plugin_data_)) { } @@ -566,7 +566,7 @@ class AuthMoreData : public WritePacket { String data; public: - explicit AuthMoreData(String data): data(std::move(data)) {} + explicit AuthMoreData(String data_): data(std::move(data_)) {} protected: size_t getPayloadSize() const override @@ -592,20 +592,20 @@ class OK_Packet : public WritePacket String session_state_changes; String info; public: - OK_Packet(uint8_t header, - uint32_t capabilities, - uint64_t affected_rows, - uint32_t status_flags, - int16_t warnings, - String session_state_changes = "", - String info = "") - : header(header) - , capabilities(capabilities) - , affected_rows(affected_rows) - , warnings(warnings) - , status_flags(status_flags) - , session_state_changes(std::move(session_state_changes)) - , info(std::move(info)) + OK_Packet(uint8_t header_, + uint32_t capabilities_, + uint64_t affected_rows_, + uint32_t status_flags_, + int16_t warnings_, + String session_state_changes_ = "", + String info_ = "") + : header(header_) + , capabilities(capabilities_) + , affected_rows(affected_rows_) + , warnings(warnings_) + , status_flags(status_flags_) + , session_state_changes(std::move(session_state_changes_)) + , info(std::move(info_)) { } @@ -671,7 +671,7 @@ class EOF_Packet : public WritePacket int warnings; int status_flags; public: - EOF_Packet(int warnings, int status_flags) : warnings(warnings), status_flags(status_flags) + EOF_Packet(int warnings_, int status_flags_) : warnings(warnings_), status_flags(status_flags_) {} protected: @@ -694,8 +694,8 @@ class ERR_Packet : public WritePacket String sql_state; String error_message; public: - ERR_Packet(int error_code, String sql_state, String error_message) - : error_code(error_code), sql_state(std::move(sql_state)), error_message(std::move(error_message)) + ERR_Packet(int error_code_, String sql_state_, String error_message_) + : error_code(error_code_), sql_state(std::move(sql_state_)), error_message(std::move(error_message_)) { } @@ -730,32 +730,32 @@ class ColumnDefinition : public WritePacket uint8_t decimals = 0x00; public: ColumnDefinition( - String schema, - String table, - String org_table, - String name, - String org_name, - uint16_t character_set, - uint32_t column_length, - ColumnType column_type, - uint16_t flags, - uint8_t decimals) + String schema_, + String table_, + String org_table_, + String name_, + String org_name_, + uint16_t character_set_, + uint32_t column_length_, + ColumnType column_type_, + uint16_t flags_, + uint8_t decimals_) - : schema(std::move(schema)), table(std::move(table)), org_table(std::move(org_table)), name(std::move(name)), - org_name(std::move(org_name)), character_set(character_set), column_length(column_length), column_type(column_type), flags(flags), - decimals(decimals) + : schema(std::move(schema_)), table(std::move(table_)), org_table(std::move(org_table_)), name(std::move(name_)), + org_name(std::move(org_name_)), character_set(character_set_), column_length(column_length_), column_type(column_type_), flags(flags_), + decimals(decimals_) { } /// Should be used when column metadata (original name, table, original table, database) is unknown. ColumnDefinition( - String name, - uint16_t character_set, - uint32_t column_length, - ColumnType column_type, - uint16_t flags, - uint8_t decimals) - : ColumnDefinition("", "", "", std::move(name), "", character_set, column_length, column_type, flags, decimals) + String name_, + uint16_t character_set_, + uint32_t column_length_, + ColumnType column_type_, + uint16_t flags_, + uint8_t decimals_) + : ColumnDefinition("", "", "", std::move(name_), "", character_set_, column_length_, column_type_, flags_, decimals_) { } @@ -801,7 +801,7 @@ class LengthEncodedNumber : public WritePacket { uint64_t value; public: - explicit LengthEncodedNumber(uint64_t value): value(value) + explicit LengthEncodedNumber(uint64_t value_): value(value_) { } diff --git a/dbms/src/Core/NamesAndTypes.cpp b/dbms/src/Core/NamesAndTypes.cpp index 09985d97463..15c2be689bf 100644 --- a/dbms/src/Core/NamesAndTypes.cpp +++ b/dbms/src/Core/NamesAndTypes.cpp @@ -26,16 +26,20 @@ void NamesAndTypesList::readText(ReadBuffer & buf) size_t count; DB::readText(count, buf); assertString(" columns:\n", buf); - resize(count); - for (NameAndTypePair & it : *this) + + String column_name; + String type_name; + for (size_t i = 0; i < count; ++i) { - readBackQuotedStringWithSQLStyle(it.name, buf); + readBackQuotedStringWithSQLStyle(column_name, buf); assertChar(' ', buf); - String type_name; readString(type_name, buf); - it.type = data_type_factory.get(type_name); assertChar('\n', buf); + + emplace_back(column_name, data_type_factory.get(type_name)); } + + assertEOF(buf); } void NamesAndTypesList::writeText(WriteBuffer & buf) const diff --git a/dbms/src/Core/Settings.h b/dbms/src/Core/Settings.h index ea437832749..c4f374fbc70 100644 --- a/dbms/src/Core/Settings.h +++ b/dbms/src/Core/Settings.h @@ -200,6 +200,7 @@ struct Settings : public SettingsCollection M(SettingBool, join_use_nulls, 0, "Use NULLs for non-joined rows of outer JOINs for types that can be inside Nullable. If false, use default value of corresponding columns data type.") \ \ M(SettingJoinStrictness, join_default_strictness, JoinStrictness::ALL, "Set default strictness in JOIN query. Possible values: empty string, 'ANY', 'ALL'. If empty, query without strictness will throw exception.") \ + M(SettingBool, any_join_distinct_right_table_keys, false, "Enable old ANY JOIN logic with many-to-one left-to-right table keys mapping for all ANY JOINs. It leads to confusing not equal results for 't1 ANY LEFT JOIN t2' and 't2 ANY RIGHT JOIN t1'. ANY RIGHT JOIN needs one-to-many keys maping to be consistent with LEFT one.") \ \ M(SettingUInt64, preferred_block_size_bytes, 1000000, "") \ \ diff --git a/dbms/src/Core/Types.h b/dbms/src/Core/Types.h index 75c7cbaff66..81446180cdc 100644 --- a/dbms/src/Core/Types.h +++ b/dbms/src/Core/Types.h @@ -12,6 +12,41 @@ namespace DB struct Null {}; +enum class TypeIndex +{ + Nothing = 0, + UInt8, + UInt16, + UInt32, + UInt64, + UInt128, + Int8, + Int16, + Int32, + Int64, + Int128, + Float32, + Float64, + Date, + DateTime, + String, + FixedString, + Enum8, + Enum16, + Decimal32, + Decimal64, + Decimal128, + UUID, + Array, + Tuple, + Set, + Interval, + Nullable, + Function, + AggregateFunction, + LowCardinality, +}; + using UInt8 = uint8_t; using UInt16 = uint16_t; using UInt32 = uint32_t; @@ -57,41 +92,6 @@ template <> struct TypeName { static const char * get() { return "Float template <> struct TypeName { static const char * get() { return "Float64"; } }; template <> struct TypeName { static const char * get() { return "String"; } }; -enum class TypeIndex -{ - Nothing = 0, - UInt8, - UInt16, - UInt32, - UInt64, - UInt128, - Int8, - Int16, - Int32, - Int64, - Int128, - Float32, - Float64, - Date, - DateTime, - String, - FixedString, - Enum8, - Enum16, - Decimal32, - Decimal64, - Decimal128, - UUID, - Array, - Tuple, - Set, - Interval, - Nullable, - Function, - AggregateFunction, - LowCardinality, -}; - template struct TypeId; template <> struct TypeId { static constexpr const TypeIndex value = TypeIndex::UInt8; }; template <> struct TypeId { static constexpr const TypeIndex value = TypeIndex::UInt16; }; diff --git a/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h b/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h index af049cce7c0..8cfc4cbeab4 100644 --- a/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h +++ b/dbms/src/DataStreams/CollapsingFinalBlockInputStream.h @@ -45,9 +45,9 @@ private: MergingBlock(const Block & block_, size_t stream_index_, const SortDescription & desc, - const String & sign_column_name, - BlockPlainPtrs * output_blocks) - : block(block_), stream_index(stream_index_), output_blocks(output_blocks) + const String & sign_column_name_, + BlockPlainPtrs * output_blocks_) + : block(block_), stream_index(stream_index_), output_blocks(output_blocks_) { sort_columns.resize(desc.size()); for (size_t i = 0; i < desc.size(); ++i) @@ -59,7 +59,7 @@ private: sort_columns[i] = block.safeGetByPosition(column_number).column.get(); } - const IColumn * sign_icolumn = block.getByName(sign_column_name).column.get(); + const IColumn * sign_icolumn = block.getByName(sign_column_name_).column.get(); sign_column = typeid_cast(sign_icolumn); diff --git a/dbms/src/DataStreams/DistinctBlockInputStream.cpp b/dbms/src/DataStreams/DistinctBlockInputStream.cpp index 77ea87f1be3..54d7134d0cd 100644 --- a/dbms/src/DataStreams/DistinctBlockInputStream.cpp +++ b/dbms/src/DataStreams/DistinctBlockInputStream.cpp @@ -8,10 +8,10 @@ namespace ErrorCodes extern const int SET_SIZE_LIMIT_EXCEEDED; } -DistinctBlockInputStream::DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns) - : columns_names(columns) +DistinctBlockInputStream::DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns_) + : columns_names(columns_) , limit_hint(limit_hint_) - , set_size_limits(set_size_limits) + , set_size_limits(set_size_limits_) { children.push_back(input); } diff --git a/dbms/src/DataStreams/DistinctBlockInputStream.h b/dbms/src/DataStreams/DistinctBlockInputStream.h index 3eb7c5ffcb0..4df0bf46070 100644 --- a/dbms/src/DataStreams/DistinctBlockInputStream.h +++ b/dbms/src/DataStreams/DistinctBlockInputStream.h @@ -17,7 +17,7 @@ class DistinctBlockInputStream : public IBlockInputStream { public: /// Empty columns_ means all collumns. - DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns); + DistinctBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns_); String getName() const override { return "Distinct"; } diff --git a/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp b/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp index 2b715f64823..e8e9f7278aa 100644 --- a/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/DistinctSortedBlockInputStream.cpp @@ -9,11 +9,11 @@ namespace ErrorCodes } DistinctSortedBlockInputStream::DistinctSortedBlockInputStream( - const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns) + const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns) : description(input->getSortDescription()) , columns_names(columns) , limit_hint(limit_hint_) - , set_size_limits(set_size_limits) + , set_size_limits(set_size_limits_) { children.push_back(input); } diff --git a/dbms/src/DataStreams/DistinctSortedBlockInputStream.h b/dbms/src/DataStreams/DistinctSortedBlockInputStream.h index 9ecc053feb5..dbccb892b3f 100644 --- a/dbms/src/DataStreams/DistinctSortedBlockInputStream.h +++ b/dbms/src/DataStreams/DistinctSortedBlockInputStream.h @@ -21,7 +21,7 @@ class DistinctSortedBlockInputStream : public IBlockInputStream { public: /// Empty columns_ means all collumns. - DistinctSortedBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits, UInt64 limit_hint_, const Names & columns); + DistinctSortedBlockInputStream(const BlockInputStreamPtr & input, const SizeLimits & set_size_limits_, UInt64 limit_hint_, const Names & columns); String getName() const override { return "DistinctSorted"; } diff --git a/dbms/src/DataStreams/FilterBlockInputStream.cpp b/dbms/src/DataStreams/FilterBlockInputStream.cpp index 63782890331..8613bc8cf8f 100644 --- a/dbms/src/DataStreams/FilterBlockInputStream.cpp +++ b/dbms/src/DataStreams/FilterBlockInputStream.cpp @@ -18,8 +18,8 @@ namespace ErrorCodes FilterBlockInputStream::FilterBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_, - const String & filter_column_name, bool remove_filter) - : remove_filter(remove_filter), expression(expression_) + const String & filter_column_name, bool remove_filter_) + : remove_filter(remove_filter_), expression(expression_) { children.push_back(input); diff --git a/dbms/src/DataStreams/FilterBlockInputStream.h b/dbms/src/DataStreams/FilterBlockInputStream.h index 9bee8a50c8a..e287b69460c 100644 --- a/dbms/src/DataStreams/FilterBlockInputStream.h +++ b/dbms/src/DataStreams/FilterBlockInputStream.h @@ -21,7 +21,7 @@ private: public: FilterBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_, - const String & filter_column_name_, bool remove_filter = false); + const String & filter_column_name_, bool remove_filter_ = false); String getName() const override; Block getTotals() override; diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp index fb24d8c37a4..456c43ca802 100644 --- a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.cpp @@ -14,9 +14,9 @@ namespace ErrorCodes GraphiteRollupSortedBlockInputStream::GraphiteRollupSortedBlockInputStream( const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_, - const Graphite::Params & params, time_t time_of_merge) + const Graphite::Params & params_, time_t time_of_merge_) : MergingSortedBlockInputStream(inputs_, description_, max_block_size_), - params(params), time_of_merge(time_of_merge) + params(params_), time_of_merge(time_of_merge_) { size_t max_size_of_aggregate_state = 0; size_t max_alignment_of_aggregate_state = 1; diff --git a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h index 00bd2f4b67e..560274f1dae 100644 --- a/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h +++ b/dbms/src/DataStreams/GraphiteRollupSortedBlockInputStream.h @@ -152,7 +152,7 @@ class GraphiteRollupSortedBlockInputStream : public MergingSortedBlockInputStrea public: GraphiteRollupSortedBlockInputStream( const BlockInputStreams & inputs_, const SortDescription & description_, size_t max_block_size_, - const Graphite::Params & params, time_t time_of_merge); + const Graphite::Params & params_, time_t time_of_merge_); String getName() const override { return "GraphiteRollupSorted"; } diff --git a/dbms/src/DataStreams/IBlockInputStream.h b/dbms/src/DataStreams/IBlockInputStream.h index b9e55763d4c..309eecbeebf 100644 --- a/dbms/src/DataStreams/IBlockInputStream.h +++ b/dbms/src/DataStreams/IBlockInputStream.h @@ -128,7 +128,7 @@ public: virtual Block getTotals(); /// The same for minimums and maximums. - Block getExtremes(); + virtual Block getExtremes(); /** Set the execution progress bar callback. diff --git a/dbms/src/DataStreams/MaterializingBlockOutputStream.h b/dbms/src/DataStreams/MaterializingBlockOutputStream.h index 9e1efeb29d3..64c2bc12a57 100644 --- a/dbms/src/DataStreams/MaterializingBlockOutputStream.h +++ b/dbms/src/DataStreams/MaterializingBlockOutputStream.h @@ -12,8 +12,8 @@ namespace DB class MaterializingBlockOutputStream : public IBlockOutputStream { public: - MaterializingBlockOutputStream(const BlockOutputStreamPtr & output, const Block & header) - : output{output}, header(header) {} + MaterializingBlockOutputStream(const BlockOutputStreamPtr & output_, const Block & header_) + : output{output_}, header(header_) {} Block getHeader() const override { return header; } void write(const Block & block) override { output->write(materializeBlock(block)); } diff --git a/dbms/src/DataStreams/MergeSortingBlockInputStream.h b/dbms/src/DataStreams/MergeSortingBlockInputStream.h index e256e575a8e..9f257b82260 100644 --- a/dbms/src/DataStreams/MergeSortingBlockInputStream.h +++ b/dbms/src/DataStreams/MergeSortingBlockInputStream.h @@ -117,8 +117,8 @@ private: CompressedReadBuffer compressed_in; BlockInputStreamPtr block_in; - TemporaryFileStream(const std::string & path, const Block & header) - : file_in(path), compressed_in(file_in), block_in(std::make_shared(compressed_in, header, 0)) {} + TemporaryFileStream(const std::string & path, const Block & header_) + : file_in(path), compressed_in(file_in), block_in(std::make_shared(compressed_in, header_, 0)) {} }; std::vector> temporary_inputs; diff --git a/dbms/src/DataStreams/NullAndDoCopyBlockInputStream.h b/dbms/src/DataStreams/NullAndDoCopyBlockInputStream.h index 961f2b77bca..8ef64cc5e05 100644 --- a/dbms/src/DataStreams/NullAndDoCopyBlockInputStream.h +++ b/dbms/src/DataStreams/NullAndDoCopyBlockInputStream.h @@ -32,7 +32,9 @@ public: String getName() const override { return "NullAndDoCopy"; } - Block getHeader() const override { return input->getHeader(); } + Block getHeader() const override { return {}; } + Block getTotals() override { return {}; } + Block getExtremes() override { return {}; } protected: Block readImpl() override diff --git a/dbms/src/DataStreams/NullBlockOutputStream.h b/dbms/src/DataStreams/NullBlockOutputStream.h index 3d437527960..8b3e61d35a7 100644 --- a/dbms/src/DataStreams/NullBlockOutputStream.h +++ b/dbms/src/DataStreams/NullBlockOutputStream.h @@ -11,7 +11,7 @@ namespace DB class NullBlockOutputStream : public IBlockOutputStream { public: - NullBlockOutputStream(const Block & header) : header(header) {} + NullBlockOutputStream(const Block & header_) : header(header_) {} Block getHeader() const override { return header; } void write(const Block &) override {} diff --git a/dbms/src/DataStreams/OwningBlockInputStream.h b/dbms/src/DataStreams/OwningBlockInputStream.h index b7ea121814c..dac42028cd7 100644 --- a/dbms/src/DataStreams/OwningBlockInputStream.h +++ b/dbms/src/DataStreams/OwningBlockInputStream.h @@ -14,8 +14,8 @@ template class OwningBlockInputStream : public IBlockInputStream { public: - OwningBlockInputStream(const BlockInputStreamPtr & stream, std::unique_ptr own) - : stream{stream}, own{std::move(own)} + OwningBlockInputStream(const BlockInputStreamPtr & stream_, std::unique_ptr own_) + : stream{stream_}, own{std::move(own_)} { children.push_back(stream); } diff --git a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h b/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h index 52852526935..5342c03e68f 100644 --- a/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h +++ b/dbms/src/DataStreams/ParallelAggregatingBlockInputStream.h @@ -84,11 +84,11 @@ private: ColumnRawPtrs key_columns; Aggregator::AggregateColumns aggregate_columns; - ThreadData(size_t keys_size, size_t aggregates_size) + ThreadData(size_t keys_size_, size_t aggregates_size_) { - key.resize(keys_size); - key_columns.resize(keys_size); - aggregate_columns.resize(aggregates_size); + key.resize(keys_size_); + key_columns.resize(keys_size_); + aggregate_columns.resize(aggregates_size_); } }; diff --git a/dbms/src/DataStreams/SizeLimits.h b/dbms/src/DataStreams/SizeLimits.h index 66373a179ab..41238087613 100644 --- a/dbms/src/DataStreams/SizeLimits.h +++ b/dbms/src/DataStreams/SizeLimits.h @@ -27,8 +27,8 @@ struct SizeLimits OverflowMode overflow_mode = OverflowMode::THROW; SizeLimits() {} - SizeLimits(UInt64 max_rows, UInt64 max_bytes, OverflowMode overflow_mode) - : max_rows(max_rows), max_bytes(max_bytes), overflow_mode(overflow_mode) {} + SizeLimits(UInt64 max_rows_, UInt64 max_bytes_, OverflowMode overflow_mode_) + : max_rows(max_rows_), max_bytes(max_bytes_), overflow_mode(overflow_mode_) {} /// Check limits. If exceeded, return false or throw an exception, depending on overflow_mode. bool check(UInt64 rows, UInt64 bytes, const char * what, int exception_code) const; diff --git a/dbms/src/DataStreams/SquashingBlockOutputStream.cpp b/dbms/src/DataStreams/SquashingBlockOutputStream.cpp index 48156ed090f..5d0638c1c57 100644 --- a/dbms/src/DataStreams/SquashingBlockOutputStream.cpp +++ b/dbms/src/DataStreams/SquashingBlockOutputStream.cpp @@ -4,8 +4,8 @@ namespace DB { -SquashingBlockOutputStream::SquashingBlockOutputStream(BlockOutputStreamPtr dst, Block header, size_t min_block_size_rows, size_t min_block_size_bytes) - : output(std::move(dst)), header(std::move(header)), transform(min_block_size_rows, min_block_size_bytes) +SquashingBlockOutputStream::SquashingBlockOutputStream(BlockOutputStreamPtr dst, Block header_, size_t min_block_size_rows, size_t min_block_size_bytes) + : output(std::move(dst)), header(std::move(header_)), transform(min_block_size_rows, min_block_size_bytes) { } diff --git a/dbms/src/DataStreams/SquashingBlockOutputStream.h b/dbms/src/DataStreams/SquashingBlockOutputStream.h index f255d18e331..7828ad7e96d 100644 --- a/dbms/src/DataStreams/SquashingBlockOutputStream.h +++ b/dbms/src/DataStreams/SquashingBlockOutputStream.h @@ -12,7 +12,7 @@ namespace DB class SquashingBlockOutputStream : public IBlockOutputStream { public: - SquashingBlockOutputStream(BlockOutputStreamPtr dst, Block header, size_t min_block_size_rows, size_t min_block_size_bytes); + SquashingBlockOutputStream(BlockOutputStreamPtr dst, Block header_, size_t min_block_size_rows, size_t min_block_size_bytes); Block getHeader() const override { return header; } void write(const Block & block) override; diff --git a/dbms/src/DataStreams/SquashingTransform.cpp b/dbms/src/DataStreams/SquashingTransform.cpp index abac72e79bd..00e3a51582c 100644 --- a/dbms/src/DataStreams/SquashingTransform.cpp +++ b/dbms/src/DataStreams/SquashingTransform.cpp @@ -4,8 +4,8 @@ namespace DB { -SquashingTransform::SquashingTransform(size_t min_block_size_rows, size_t min_block_size_bytes) - : min_block_size_rows(min_block_size_rows), min_block_size_bytes(min_block_size_bytes) +SquashingTransform::SquashingTransform(size_t min_block_size_rows_, size_t min_block_size_bytes_) + : min_block_size_rows(min_block_size_rows_), min_block_size_bytes(min_block_size_bytes_) { } diff --git a/dbms/src/DataStreams/SquashingTransform.h b/dbms/src/DataStreams/SquashingTransform.h index 519c0e1ae4b..f1681c57c8c 100644 --- a/dbms/src/DataStreams/SquashingTransform.h +++ b/dbms/src/DataStreams/SquashingTransform.h @@ -23,7 +23,7 @@ class SquashingTransform { public: /// Conditions on rows and bytes are OR-ed. If one of them is zero, then corresponding condition is ignored. - SquashingTransform(size_t min_block_size_rows, size_t min_block_size_bytes); + SquashingTransform(size_t min_block_size_rows_, size_t min_block_size_bytes_); /// When not ready, you need to pass more blocks to add function. struct Result @@ -32,7 +32,7 @@ public: MutableColumns columns; Result(bool ready_) : ready(ready_) {} - Result(MutableColumns && columns) : ready(true), columns(std::move(columns)) {} + Result(MutableColumns && columns_) : ready(true), columns(std::move(columns_)) {} }; /** Add next block and possibly returns squashed block. diff --git a/dbms/src/DataTypes/DataTypeAggregateFunction.cpp b/dbms/src/DataTypes/DataTypeAggregateFunction.cpp index a2c00e18acb..b53b7529f95 100644 --- a/dbms/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/dbms/src/DataTypes/DataTypeAggregateFunction.cpp @@ -366,7 +366,7 @@ static DataTypePtr create(const ASTPtr & arguments) params_row[i] = literal->value; } } - else if (auto opt_name = getIdentifierName(arguments->children[0])) + else if (auto opt_name = tryGetIdentifierName(arguments->children[0])) { function_name = *opt_name; } diff --git a/dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp b/dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp index 2cb0f87facd..8151ccf1948 100644 --- a/dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp +++ b/dbms/src/DataTypes/DataTypeCustomSimpleAggregateFunction.cpp @@ -88,7 +88,7 @@ static std::pair create(const ASTPtr & argum params_row[i] = lit->value; } } - else if (auto opt_name = getIdentifierName(arguments->children[0])) + else if (auto opt_name = tryGetIdentifierName(arguments->children[0])) { function_name = *opt_name; } diff --git a/dbms/src/DataTypes/DataTypeInterval.h b/dbms/src/DataTypes/DataTypeInterval.h index 22f088c01f8..fa99ac430b6 100644 --- a/dbms/src/DataTypes/DataTypeInterval.h +++ b/dbms/src/DataTypes/DataTypeInterval.h @@ -54,7 +54,7 @@ public: __builtin_unreachable(); } - DataTypeInterval(Kind kind) : kind(kind) {} + DataTypeInterval(Kind kind_) : kind(kind_) {} std::string doGetName() const override { return std::string("Interval") + kindToString(); } const char * getFamilyName() const override { return "Interval"; } diff --git a/dbms/src/DataTypes/DataTypeLowCardinality.cpp b/dbms/src/DataTypes/DataTypeLowCardinality.cpp index 33d3eb658d3..812c5d04032 100644 --- a/dbms/src/DataTypes/DataTypeLowCardinality.cpp +++ b/dbms/src/DataTypes/DataTypeLowCardinality.cpp @@ -140,11 +140,11 @@ struct IndexesSerializationType } IndexesSerializationType(const IColumn & column, - bool has_additional_keys, - bool need_global_dictionary, + bool has_additional_keys_, + bool need_global_dictionary_, bool enumerate_dictionaries) - : has_additional_keys(has_additional_keys) - , need_global_dictionary(need_global_dictionary) + : has_additional_keys(has_additional_keys_) + , need_global_dictionary(need_global_dictionary_) , need_update_dictionary(enumerate_dictionaries) { if (typeid_cast(&column)) @@ -182,7 +182,7 @@ struct SerializeStateLowCardinality : public IDataType::SerializeBinaryBulkState KeysSerializationVersion key_version; MutableColumnUniquePtr shared_dictionary; - explicit SerializeStateLowCardinality(UInt64 key_version) : key_version(key_version) {} + explicit SerializeStateLowCardinality(UInt64 key_version_) : key_version(key_version_) {} }; struct DeserializeStateLowCardinality : public IDataType::DeserializeBinaryBulkState @@ -201,7 +201,7 @@ struct DeserializeStateLowCardinality : public IDataType::DeserializeBinaryBulkS /// in case of long block of empty arrays we may not need read dictionary at first reading. bool need_update_dictionary = false; - explicit DeserializeStateLowCardinality(UInt64 key_version) : key_version(key_version) {} + explicit DeserializeStateLowCardinality(UInt64 key_version_) : key_version(key_version_) {} }; static SerializeStateLowCardinality * checkAndGetLowCardinalitySerializeState( @@ -791,8 +791,8 @@ namespace const IDataType & keys_type; const Creator & creator; - CreateColumnVector(MutableColumnUniquePtr & column, const IDataType & keys_type, const Creator & creator) - : column(column), keys_type(keys_type), creator(creator) + CreateColumnVector(MutableColumnUniquePtr & column_, const IDataType & keys_type_, const Creator & creator_) + : column(column_), keys_type(keys_type_), creator(creator_) { } diff --git a/dbms/src/DataTypes/IDataType.h b/dbms/src/DataTypes/IDataType.h index e5020fe19de..2812b48e8f0 100644 --- a/dbms/src/DataTypes/IDataType.h +++ b/dbms/src/DataTypes/IDataType.h @@ -98,7 +98,7 @@ public: /// Index of tuple element, starting at 1. String tuple_element_name; - Substream(Type type) : type(type) {} + Substream(Type type_) : type(type_) {} }; using SubstreamPath = std::vector; diff --git a/dbms/src/Dictionaries/CacheDictionary.cpp b/dbms/src/Dictionaries/CacheDictionary.cpp index 4d8cd03b3eb..53fc746e565 100644 --- a/dbms/src/Dictionaries/CacheDictionary.cpp +++ b/dbms/src/Dictionaries/CacheDictionary.cpp @@ -61,16 +61,16 @@ inline size_t CacheDictionary::getCellIdx(const Key id) const CacheDictionary::CacheDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - const size_t size) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , size{roundUpToPowerOfTwoOrZero(std::max(size, size_t(max_collision_length)))} + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + const size_t size_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , size{roundUpToPowerOfTwoOrZero(std::max(size_, size_t(max_collision_length)))} , size_overlap_mask{this->size - 1} , cells{this->size} , rnd_engine(randomSeed()) @@ -207,7 +207,7 @@ void CacheDictionary::isInConstantVector(const Key child_id, const PaddedPODArra void CacheDictionary::getString(const std::string & attribute_name, const PaddedPODArray & ids, ColumnString * out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto null_value = StringRef{std::get(attribute.null_values)}; @@ -218,7 +218,7 @@ void CacheDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const ColumnString * const def, ColumnString * const out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsString(attribute, ids, out, [&](const size_t row) { return def->getDataAt(row); }); } @@ -227,7 +227,7 @@ void CacheDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const String & def, ColumnString * const out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsString(attribute, ids, out, [&](const size_t) { return StringRef{def}; }); } @@ -354,7 +354,7 @@ void CacheDictionary::createAttributes() { hierarchical_attribute = &attributes.back(); - if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) + if (hierarchical_attribute->type != AttributeUnderlyingType::utUInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; } } @@ -367,7 +367,7 @@ CacheDictionary::Attribute CacheDictionary::createAttributeWithType(const Attrib switch (type) { #define DISPATCH(TYPE) \ - case AttributeUnderlyingType::TYPE: \ + case AttributeUnderlyingType::ut##TYPE: \ attr.null_values = TYPE(null_value.get>()); \ attr.arrays = std::make_unique>(size); \ bytes_allocated += size * sizeof(TYPE); \ @@ -387,7 +387,7 @@ CacheDictionary::Attribute CacheDictionary::createAttributeWithType(const Attrib DISPATCH(Float32) DISPATCH(Float64) #undef DISPATCH - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: attr.null_values = null_value.get(); attr.arrays = std::make_unique>(size); bytes_allocated += size * sizeof(StringRef); @@ -403,51 +403,51 @@ void CacheDictionary::setDefaultAttributeValue(Attribute & attribute, const Key { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & null_value_ref = std::get(attribute.null_values); auto & string_ref = std::get>(attribute.arrays)[idx]; @@ -469,51 +469,51 @@ void CacheDictionary::setAttributeValue(Attribute & attribute, const Key idx, co { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & string = value.get(); auto & string_ref = std::get>(attribute.arrays)[idx]; diff --git a/dbms/src/Dictionaries/CacheDictionary.h b/dbms/src/Dictionaries/CacheDictionary.h index cc613d0d96b..7e1cec6ffe9 100644 --- a/dbms/src/Dictionaries/CacheDictionary.h +++ b/dbms/src/Dictionaries/CacheDictionary.h @@ -24,11 +24,11 @@ class CacheDictionary final : public IDictionary { public: CacheDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - const size_t size); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + const size_t size_); std::string getName() const override { return name; } diff --git a/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in b/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in index 6a545403aed..b870a0ed69b 100644 --- a/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in +++ b/dbms/src/Dictionaries/CacheDictionary_generate1.cpp.in @@ -12,7 +12,7 @@ using TYPE = @NAME@; void CacheDictionary::get@NAME@(const std::string & attribute_name, const PaddedPODArray & ids, ResultArrayType & out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); const auto null_value = std::get(attribute.null_values); diff --git a/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in b/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in index 787a0f267af..367e150c2cb 100644 --- a/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in +++ b/dbms/src/Dictionaries/CacheDictionary_generate2.cpp.in @@ -15,7 +15,7 @@ void CacheDictionary::get@NAME@(const std::string & attribute_name, ResultArrayType & out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); getItemsNumberImpl(attribute, ids, out, [&](const size_t row) { return def[row]; }); } diff --git a/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in b/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in index 7b1d08920f9..8e2c26302e8 100644 --- a/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in +++ b/dbms/src/Dictionaries/CacheDictionary_generate3.cpp.in @@ -12,7 +12,7 @@ using TYPE = @NAME@; void CacheDictionary::get@NAME@(const std::string & attribute_name, const PaddedPODArray & ids, const TYPE def, ResultArrayType & out) const { auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); getItemsNumberImpl(attribute, ids, out, [&](const size_t) { return def; }); } diff --git a/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp b/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp index 67124ff880c..a3b4e8c5cfb 100644 --- a/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/dbms/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -51,7 +51,7 @@ ClickHouseDictionarySource::ClickHouseDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - const Block & sample_block, + const Block & sample_block_, Context & context_) : update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} @@ -66,7 +66,7 @@ ClickHouseDictionarySource::ClickHouseDictionarySource( , update_field{config.getString(config_prefix + ".update_field", "")} , invalidate_query{config.getString(config_prefix + ".invalidate_query", "")} , query_builder{dict_struct, db, table, where, IdentifierQuotingStyle::Backticks} - , sample_block{sample_block} + , sample_block{sample_block_} , context(context_) , is_local{isLocalAddress({host, port}, context.getTCPPort())} , pool{is_local ? nullptr : createPool(host, port, secure, db, user, password)} diff --git a/dbms/src/Dictionaries/ClickHouseDictionarySource.h b/dbms/src/Dictionaries/ClickHouseDictionarySource.h index 991782b1549..3df962708bd 100644 --- a/dbms/src/Dictionaries/ClickHouseDictionarySource.h +++ b/dbms/src/Dictionaries/ClickHouseDictionarySource.h @@ -22,7 +22,7 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - const Block & sample_block, + const Block & sample_block_, Context & context); /// copy-constructor is provided in order to support cloneability diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp index b9172746120..3478e631076 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.cpp @@ -51,16 +51,16 @@ inline UInt64 ComplexKeyCacheDictionary::getCellIdx(const StringRef key) const ComplexKeyCacheDictionary::ComplexKeyCacheDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - const size_t size) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , size{roundUpToPowerOfTwoOrZero(std::max(size, size_t(max_collision_length)))} + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + const size_t size_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , size{roundUpToPowerOfTwoOrZero(std::max(size_, size_t(max_collision_length)))} , size_overlap_mask{this->size - 1} , rnd_engine(randomSeed()) { @@ -77,7 +77,7 @@ void ComplexKeyCacheDictionary::getString( dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto null_value = StringRef{std::get(attribute.null_values)}; @@ -94,7 +94,7 @@ void ComplexKeyCacheDictionary::getString( dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsString(attribute, key_columns, out, [&](const size_t row) { return def->getDataAt(row); }); } @@ -109,7 +109,7 @@ void ComplexKeyCacheDictionary::getString( dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsString(attribute, key_columns, out, [&](const size_t) { return StringRef{def}; }); } @@ -290,7 +290,7 @@ StringRef ComplexKeyCacheDictionary::placeKeysInPool( { keys[j] = key_columns[j]->getDataAt(row); sum_keys_size += keys[j].size; - if (key_attributes[j].underlying_type == AttributeUnderlyingType::String) + if (key_attributes[j].underlying_type == AttributeUnderlyingType::utString) sum_keys_size += sizeof(size_t) + 1; } @@ -299,7 +299,7 @@ StringRef ComplexKeyCacheDictionary::placeKeysInPool( auto key_start = place; for (size_t j = 0; j < keys_size; ++j) { - if (key_attributes[j].underlying_type == AttributeUnderlyingType::String) + if (key_attributes[j].underlying_type == AttributeUnderlyingType::utString) { auto start = key_start; auto key_size = keys[j].size + 1; diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h index ffac807c04c..7c2ba75ba17 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary.h @@ -42,11 +42,11 @@ class ComplexKeyCacheDictionary final : public IDictionaryBase { public: ComplexKeyCacheDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - const size_t size); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + const size_t size_); std::string getKeyDescription() const { return key_description; } diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp index aeb85881f86..e15a6fb3014 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_createAttributeWithType.cpp @@ -10,7 +10,7 @@ ComplexKeyCacheDictionary::createAttributeWithType(const AttributeUnderlyingType switch (type) { #define DISPATCH(TYPE) \ - case AttributeUnderlyingType::TYPE: \ + case AttributeUnderlyingType::ut##TYPE: \ attr.null_values = TYPE(null_value.get>()); \ attr.arrays = std::make_unique>(size); \ bytes_allocated += size * sizeof(TYPE); \ @@ -30,7 +30,7 @@ ComplexKeyCacheDictionary::createAttributeWithType(const AttributeUnderlyingType DISPATCH(Float32) DISPATCH(Float64) #undef DISPATCH - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: attr.null_values = null_value.get(); attr.arrays = std::make_unique>(size); bytes_allocated += size * sizeof(StringRef); diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in index fd6863e4669..5c0ed408a55 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate1.cpp.in @@ -13,7 +13,7 @@ void ComplexKeyCacheDictionary::get@NAME@(const std::string & attribute_name, co dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); const auto null_value = std::get(attribute.null_values); diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in index 6b94bee5700..b3233cd05e1 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate2.cpp.in @@ -18,7 +18,7 @@ void ComplexKeyCacheDictionary::get@NAME@(const std::string & attribute_name, dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); getItemsNumberImpl(attribute, key_columns, out, [&](const size_t row) { return def[row]; }); } diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in index 20e05efe399..02e77c01a4a 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_generate3.cpp.in @@ -18,7 +18,7 @@ void ComplexKeyCacheDictionary::get@NAME@(const std::string & attribute_name, dict_struct.validateKeyTypes(key_types); auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::@NAME@); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut@NAME@); getItemsNumberImpl(attribute, key_columns, out, [&](const size_t) { return def; }); } diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp index 7b3a44214c5..cf2eef82347 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setAttributeValue.cpp @@ -6,51 +6,51 @@ void ComplexKeyCacheDictionary::setAttributeValue(Attribute & attribute, const s { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & string = value.get(); auto & string_ref = std::get>(attribute.arrays)[idx]; diff --git a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp index 89cf1506f90..aa03cc88038 100644 --- a/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp +++ b/dbms/src/Dictionaries/ComplexKeyCacheDictionary_setDefaultAttributeValue.cpp @@ -6,51 +6,51 @@ void ComplexKeyCacheDictionary::setDefaultAttributeValue(Attribute & attribute, { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: std::get>(attribute.arrays)[idx] = std::get(attribute.null_values); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & null_value_ref = std::get(attribute.null_values); auto & string_ref = std::get>(attribute.arrays)[idx]; diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp index 39ef9124061..586fc5e89f9 100644 --- a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp +++ b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.cpp @@ -15,18 +15,18 @@ namespace ErrorCodes } ComplexKeyHashedDictionary::ComplexKeyHashedDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) - , saved_block{std::move(saved_block)} + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) + , saved_block{std::move(saved_block_)} { createAttributes(); loadData(); @@ -40,7 +40,7 @@ ComplexKeyHashedDictionary::ComplexKeyHashedDictionary( dict_struct.validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ const auto null_value = std::get(attribute.null_values); \ \ @@ -72,7 +72,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto & null_value = StringRef{std::get(attribute.null_values)}; @@ -94,7 +94,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, \ @@ -128,7 +128,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -148,7 +148,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, key_columns, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t) { return def; }); \ @@ -179,7 +179,7 @@ void ComplexKeyHashedDictionary::getString( dict_struct.validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -196,50 +196,50 @@ void ComplexKeyHashedDictionary::has(const Columns & key_columns, const DataType switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: has(attribute, key_columns, out); break; } @@ -416,51 +416,51 @@ void ComplexKeyHashedDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -487,51 +487,51 @@ ComplexKeyHashedDictionary::createAttributeWithType(const AttributeUnderlyingTyp switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { attr.null_values = null_value.get(); attr.maps.emplace>(); @@ -583,37 +583,37 @@ bool ComplexKeyHashedDictionary::setAttributeValue(Attribute & attribute, const { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { auto & map = std::get>(attribute.maps); const auto & string = value.get(); @@ -687,36 +687,36 @@ std::vector ComplexKeyHashedDictionary::getKeys() const switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return getKeys(attribute); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return getKeys(attribute); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return getKeys(attribute); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return getKeys(attribute); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return getKeys(attribute); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return getKeys(attribute); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return getKeys(attribute); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return getKeys(attribute); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return getKeys(attribute); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return getKeys(attribute); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return getKeys(attribute); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: return getKeys(attribute); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return getKeys(attribute); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return getKeys(attribute); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return getKeys(attribute); } return {}; diff --git a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h index 54ee8627f9b..68b8d9d0d36 100644 --- a/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h +++ b/dbms/src/Dictionaries/ComplexKeyHashedDictionary.h @@ -23,12 +23,12 @@ class ComplexKeyHashedDictionary final : public IDictionaryBase { public: ComplexKeyHashedDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block = nullptr); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_ = nullptr); std::string getKeyDescription() const { return key_description; } diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStream.h b/dbms/src/Dictionaries/DictionaryBlockInputStream.h index b5af6e3b912..09b9ec8d4af 100644 --- a/dbms/src/Dictionaries/DictionaryBlockInputStream.h +++ b/dbms/src/Dictionaries/DictionaryBlockInputStream.h @@ -202,11 +202,11 @@ private: template DictionaryBlockInputStream::DictionaryBlockInputStream( - std::shared_ptr dictionary, UInt64 max_block_size, PaddedPODArray && ids, const Names & column_names) - : DictionaryBlockInputStreamBase(ids.size(), max_block_size) - , dictionary(std::static_pointer_cast(dictionary)) - , column_names(column_names) - , ids(std::move(ids)) + std::shared_ptr dictionary_, UInt64 max_block_size_, PaddedPODArray && ids_, const Names & column_names_) + : DictionaryBlockInputStreamBase(ids_.size(), max_block_size_) + , dictionary(std::static_pointer_cast(dictionary_)) + , column_names(column_names_) + , ids(std::move(ids_)) , logger(&Poco::Logger::get("DictionaryBlockInputStream")) , fill_block_function( &DictionaryBlockInputStream::fillBlock) @@ -216,13 +216,13 @@ DictionaryBlockInputStream::DictionaryBlockInputStream( template DictionaryBlockInputStream::DictionaryBlockInputStream( - std::shared_ptr dictionary, - UInt64 max_block_size, + std::shared_ptr dictionary_, + UInt64 max_block_size_, const std::vector & keys, - const Names & column_names) - : DictionaryBlockInputStreamBase(keys.size(), max_block_size) - , dictionary(std::static_pointer_cast(dictionary)) - , column_names(column_names) + const Names & column_names_) + : DictionaryBlockInputStreamBase(keys.size(), max_block_size_) + , dictionary(std::static_pointer_cast(dictionary_)) + , column_names(column_names_) , logger(&Poco::Logger::get("DictionaryBlockInputStream")) , fill_block_function(&DictionaryBlockInputStream::fillBlock) , key_type(DictionaryKeyType::ComplexKey) @@ -233,20 +233,20 @@ DictionaryBlockInputStream::DictionaryBlockInputStream( template DictionaryBlockInputStream::DictionaryBlockInputStream( - std::shared_ptr dictionary, - UInt64 max_block_size, - const Columns & data_columns, - const Names & column_names, - GetColumnsFunction && get_key_columns_function, - GetColumnsFunction && get_view_columns_function) - : DictionaryBlockInputStreamBase(data_columns.front()->size(), max_block_size) - , dictionary(std::static_pointer_cast(dictionary)) - , column_names(column_names) + std::shared_ptr dictionary_, + UInt64 max_block_size_, + const Columns & data_columns_, + const Names & column_names_, + GetColumnsFunction && get_key_columns_function_, + GetColumnsFunction && get_view_columns_function_) + : DictionaryBlockInputStreamBase(data_columns_.front()->size(), max_block_size_) + , dictionary(std::static_pointer_cast(dictionary_)) + , column_names(column_names_) , logger(&Poco::Logger::get("DictionaryBlockInputStream")) , fill_block_function(&DictionaryBlockInputStream::fillBlock) - , data_columns(data_columns) - , get_key_columns_function(get_key_columns_function) - , get_view_columns_function(get_view_columns_function) + , data_columns(data_columns_) + , get_key_columns_function(get_key_columns_function_) + , get_view_columns_function(get_view_columns_function_) , key_type(DictionaryKeyType::Callback) { } @@ -422,58 +422,58 @@ Block DictionaryBlockInputStream::fillBlock( column = getColumnFromAttribute>(&DictionaryType::get##TYPE, ids_to_fill, keys, data_types, attribute, *dictionary) switch (attribute.underlying_type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: GET_COLUMN_FORM_ATTRIBUTE(UInt8); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: GET_COLUMN_FORM_ATTRIBUTE(UInt16); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: GET_COLUMN_FORM_ATTRIBUTE(UInt32); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: GET_COLUMN_FORM_ATTRIBUTE(UInt64); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: GET_COLUMN_FORM_ATTRIBUTE(UInt128); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: GET_COLUMN_FORM_ATTRIBUTE(Int8); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: GET_COLUMN_FORM_ATTRIBUTE(Int16); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: GET_COLUMN_FORM_ATTRIBUTE(Int32); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: GET_COLUMN_FORM_ATTRIBUTE(Int64); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: GET_COLUMN_FORM_ATTRIBUTE(Float32); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: GET_COLUMN_FORM_ATTRIBUTE(Float64); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: { column = getColumnFromAttribute>( &DictionaryType::getDecimal32, ids_to_fill, keys, data_types, attribute, *dictionary); break; } - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: { column = getColumnFromAttribute>( &DictionaryType::getDecimal64, ids_to_fill, keys, data_types, attribute, *dictionary); break; } - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: { column = getColumnFromAttribute>( &DictionaryType::getDecimal128, ids_to_fill, keys, data_types, attribute, *dictionary); break; } - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { column = getColumnFromStringAttribute( &DictionaryType::getString, ids_to_fill, keys, data_types, attribute, *dictionary); diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp b/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp index 1a793d4705f..3a3fd09220f 100644 --- a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp +++ b/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.cpp @@ -2,8 +2,8 @@ namespace DB { -DictionaryBlockInputStreamBase::DictionaryBlockInputStreamBase(size_t rows_count, size_t max_block_size) - : rows_count(rows_count), max_block_size(max_block_size) +DictionaryBlockInputStreamBase::DictionaryBlockInputStreamBase(size_t rows_count_, size_t max_block_size_) + : rows_count(rows_count_), max_block_size(max_block_size_) { } diff --git a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h b/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h index 571a0da3b50..fb99918aed8 100644 --- a/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h +++ b/dbms/src/Dictionaries/DictionaryBlockInputStreamBase.h @@ -7,7 +7,7 @@ namespace DB class DictionaryBlockInputStreamBase : public IBlockInputStream { protected: - DictionaryBlockInputStreamBase(size_t rows_count, size_t max_block_size); + DictionaryBlockInputStreamBase(size_t rows_count_, size_t max_block_size_); virtual Block getBlock(size_t start, size_t length) const = 0; diff --git a/dbms/src/Dictionaries/DictionaryStructure.cpp b/dbms/src/Dictionaries/DictionaryStructure.cpp index d43b749935f..925e9e01a82 100644 --- a/dbms/src/Dictionaries/DictionaryStructure.cpp +++ b/dbms/src/Dictionaries/DictionaryStructure.cpp @@ -43,20 +43,20 @@ namespace AttributeUnderlyingType getAttributeUnderlyingType(const std::string & type) { static const std::unordered_map dictionary{ - {"UInt8", AttributeUnderlyingType::UInt8}, - {"UInt16", AttributeUnderlyingType::UInt16}, - {"UInt32", AttributeUnderlyingType::UInt32}, - {"UInt64", AttributeUnderlyingType::UInt64}, - {"UUID", AttributeUnderlyingType::UInt128}, - {"Int8", AttributeUnderlyingType::Int8}, - {"Int16", AttributeUnderlyingType::Int16}, - {"Int32", AttributeUnderlyingType::Int32}, - {"Int64", AttributeUnderlyingType::Int64}, - {"Float32", AttributeUnderlyingType::Float32}, - {"Float64", AttributeUnderlyingType::Float64}, - {"String", AttributeUnderlyingType::String}, - {"Date", AttributeUnderlyingType::UInt16}, - {"DateTime", AttributeUnderlyingType::UInt32}, + {"UInt8", AttributeUnderlyingType::utUInt8}, + {"UInt16", AttributeUnderlyingType::utUInt16}, + {"UInt32", AttributeUnderlyingType::utUInt32}, + {"UInt64", AttributeUnderlyingType::utUInt64}, + {"UUID", AttributeUnderlyingType::utUInt128}, + {"Int8", AttributeUnderlyingType::utInt8}, + {"Int16", AttributeUnderlyingType::utInt16}, + {"Int32", AttributeUnderlyingType::utInt32}, + {"Int64", AttributeUnderlyingType::utInt64}, + {"Float32", AttributeUnderlyingType::utFloat32}, + {"Float64", AttributeUnderlyingType::utFloat64}, + {"String", AttributeUnderlyingType::utString}, + {"Date", AttributeUnderlyingType::utUInt16}, + {"DateTime", AttributeUnderlyingType::utUInt32}, }; const auto it = dictionary.find(type); @@ -67,11 +67,11 @@ AttributeUnderlyingType getAttributeUnderlyingType(const std::string & type) { size_t start = strlen("Decimal"); if (type.find("32", start) == start) - return AttributeUnderlyingType::Decimal32; + return AttributeUnderlyingType::utDecimal32; if (type.find("64", start) == start) - return AttributeUnderlyingType::Decimal64; + return AttributeUnderlyingType::utDecimal64; if (type.find("128", start) == start) - return AttributeUnderlyingType::Decimal128; + return AttributeUnderlyingType::utDecimal128; } throw Exception{"Unknown type " + type, ErrorCodes::UNKNOWN_TYPE}; @@ -82,35 +82,35 @@ std::string toString(const AttributeUnderlyingType type) { switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return "UInt8"; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return "UInt16"; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return "UInt32"; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return "UInt64"; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return "UUID"; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return "Int8"; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return "Int16"; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return "Int32"; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return "Int64"; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return "Float32"; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return "Float64"; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return "Decimal32"; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return "Decimal64"; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return "Decimal128"; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: return "String"; } @@ -243,7 +243,7 @@ bool DictionaryStructure::isKeySizeFixed() const return true; for (const auto & key_i : *key) - if (key_i.underlying_type == AttributeUnderlyingType::String) + if (key_i.underlying_type == AttributeUnderlyingType::utString) return false; return true; diff --git a/dbms/src/Dictionaries/DictionaryStructure.h b/dbms/src/Dictionaries/DictionaryStructure.h index 4de712a012a..f39f59d90d2 100644 --- a/dbms/src/Dictionaries/DictionaryStructure.h +++ b/dbms/src/Dictionaries/DictionaryStructure.h @@ -21,21 +21,21 @@ namespace ErrorCodes enum class AttributeUnderlyingType { - UInt8, - UInt16, - UInt32, - UInt64, - UInt128, - Int8, - Int16, - Int32, - Int64, - Float32, - Float64, - Decimal32, - Decimal64, - Decimal128, - String + utUInt8, + utUInt16, + utUInt32, + utUInt64, + utUInt128, + utInt8, + utInt16, + utInt32, + utInt64, + utFloat32, + utFloat64, + utDecimal32, + utDecimal64, + utDecimal128, + utString }; diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp b/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp index 67180921a93..94ee6d8b2f8 100644 --- a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp +++ b/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.cpp @@ -20,7 +20,7 @@ IRegionsHierarchyReaderPtr RegionsHierarchyDataSource::createReader() } -RegionsHierarchiesDataProvider::RegionsHierarchiesDataProvider(const std::string & path) : path(path) +RegionsHierarchiesDataProvider::RegionsHierarchiesDataProvider(const std::string & path_) : path(path_) { discoverFilesWithCustomHierarchies(); } diff --git a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h b/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h index 1c7392cb98a..198f13e0f32 100644 --- a/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h +++ b/dbms/src/Dictionaries/Embedded/GeodataProviders/HierarchiesProvider.h @@ -40,7 +40,7 @@ public: * For example, if /opt/geo/regions_hierarchy.txt is specified, * then the /opt/geo/regions_hierarchy_ua.txt file will also be loaded, if any, it will be accessible by the `ua` key. */ - RegionsHierarchiesDataProvider(const std::string & path); + RegionsHierarchiesDataProvider(const std::string & path_); std::vector listCustomHierarchies() const override; diff --git a/dbms/src/Dictionaries/ExecutableDictionarySource.cpp b/dbms/src/Dictionaries/ExecutableDictionarySource.cpp index b54894e043c..d76de3abe0e 100644 --- a/dbms/src/Dictionaries/ExecutableDictionarySource.cpp +++ b/dbms/src/Dictionaries/ExecutableDictionarySource.cpp @@ -46,15 +46,15 @@ ExecutableDictionarySource::ExecutableDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block, - const Context & context) + Block & sample_block_, + const Context & context_) : log(&Logger::get("ExecutableDictionarySource")) , dict_struct{dict_struct_} , command{config.getString(config_prefix + ".command")} , update_field{config.getString(config_prefix + ".update_field", "")} , format{config.getString(config_prefix + ".format")} - , sample_block{sample_block} - , context(context) + , sample_block{sample_block_} + , context(context_) { } diff --git a/dbms/src/Dictionaries/ExecutableDictionarySource.h b/dbms/src/Dictionaries/ExecutableDictionarySource.h index 9816161a70e..879248663dc 100644 --- a/dbms/src/Dictionaries/ExecutableDictionarySource.h +++ b/dbms/src/Dictionaries/ExecutableDictionarySource.h @@ -19,8 +19,8 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block, - const Context & context); + Block & sample_block_, + const Context & context_); ExecutableDictionarySource(const ExecutableDictionarySource & other); ExecutableDictionarySource & operator=(const ExecutableDictionarySource &) = delete; diff --git a/dbms/src/Dictionaries/FileDictionarySource.cpp b/dbms/src/Dictionaries/FileDictionarySource.cpp index 2ac580d7bb3..1505c2629f6 100644 --- a/dbms/src/Dictionaries/FileDictionarySource.cpp +++ b/dbms/src/Dictionaries/FileDictionarySource.cpp @@ -13,8 +13,8 @@ static const UInt64 max_block_size = 8192; FileDictionarySource::FileDictionarySource( - const std::string & filename, const std::string & format, Block & sample_block, const Context & context) - : filename{filename}, format{format}, sample_block{sample_block}, context(context) + const std::string & filename_, const std::string & format_, Block & sample_block_, const Context & context_) + : filename{filename_}, format{format_}, sample_block{sample_block_}, context(context_) { } diff --git a/dbms/src/Dictionaries/FileDictionarySource.h b/dbms/src/Dictionaries/FileDictionarySource.h index 083c3c6a3a7..b7ed46a99e2 100644 --- a/dbms/src/Dictionaries/FileDictionarySource.h +++ b/dbms/src/Dictionaries/FileDictionarySource.h @@ -13,7 +13,7 @@ class Context; class FileDictionarySource final : public IDictionarySource { public: - FileDictionarySource(const std::string & filename, const std::string & format, Block & sample_block, const Context & context); + FileDictionarySource(const std::string & filename_, const std::string & format_, Block & sample_block_, const Context & context_); FileDictionarySource(const FileDictionarySource & other); diff --git a/dbms/src/Dictionaries/FlatDictionary.cpp b/dbms/src/Dictionaries/FlatDictionary.cpp index b7b70748c01..d1c6a138c89 100644 --- a/dbms/src/Dictionaries/FlatDictionary.cpp +++ b/dbms/src/Dictionaries/FlatDictionary.cpp @@ -21,19 +21,19 @@ static const auto max_array_size = 500000; FlatDictionary::FlatDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) , loaded_ids(initial_array_size, false) - , saved_block{std::move(saved_block)} + , saved_block{std::move(saved_block_)} { createAttributes(); loadData(); @@ -107,7 +107,7 @@ void FlatDictionary::isInConstantVector(const Key child_id, const PaddedPODArray void FlatDictionary::get##TYPE(const std::string & attribute_name, const PaddedPODArray & ids, ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ const auto null_value = std::get(attribute.null_values); \ \ @@ -133,7 +133,7 @@ DECLARE(Decimal128) void FlatDictionary::getString(const std::string & attribute_name, const PaddedPODArray & ids, ColumnString * out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto & null_value = std::get(attribute.null_values); @@ -152,7 +152,7 @@ void FlatDictionary::getString(const std::string & attribute_name, const PaddedP ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, ids, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t row) { return def[row]; }); \ @@ -177,7 +177,7 @@ void FlatDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const ColumnString * const def, ColumnString * const out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -191,7 +191,7 @@ void FlatDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const TYPE def, ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, ids, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t) { return def; }); \ @@ -216,7 +216,7 @@ void FlatDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const String & def, ColumnString * const out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); FlatDictionary::getItemsImpl( attribute, @@ -232,50 +232,50 @@ void FlatDictionary::has(const PaddedPODArray & ids, PaddedPODArray switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: has(attribute, ids, out); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: has(attribute, ids, out); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: has(attribute, ids, out); break; } @@ -296,7 +296,7 @@ void FlatDictionary::createAttributes() { hierarchical_attribute = &attributes.back(); - if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) + if (hierarchical_attribute->type != AttributeUnderlyingType::utUInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; } } @@ -425,51 +425,51 @@ void FlatDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -506,50 +506,50 @@ FlatDictionary::Attribute FlatDictionary::createAttributeWithType(const Attribut switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; } @@ -612,50 +612,50 @@ void FlatDictionary::setAttributeValue(Attribute & attribute, const Key id, cons { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: setAttributeValueImpl(attribute, id, value.get()); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: setAttributeValueImpl(attribute, id, value.get()); break; } diff --git a/dbms/src/Dictionaries/FlatDictionary.h b/dbms/src/Dictionaries/FlatDictionary.h index de14cc3dc1a..d9ea141de2e 100644 --- a/dbms/src/Dictionaries/FlatDictionary.h +++ b/dbms/src/Dictionaries/FlatDictionary.h @@ -22,12 +22,12 @@ class FlatDictionary final : public IDictionary { public: FlatDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block = nullptr); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_ = nullptr); std::string getName() const override { return name; } diff --git a/dbms/src/Dictionaries/HTTPDictionarySource.cpp b/dbms/src/Dictionaries/HTTPDictionarySource.cpp index bb64b13c103..fffbcc402b0 100644 --- a/dbms/src/Dictionaries/HTTPDictionarySource.cpp +++ b/dbms/src/Dictionaries/HTTPDictionarySource.cpp @@ -22,16 +22,16 @@ HTTPDictionarySource::HTTPDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block, - const Context & context) + Block & sample_block_, + const Context & context_) : log(&Logger::get("HTTPDictionarySource")) , update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} , url{config.getString(config_prefix + ".url", "")} , update_field{config.getString(config_prefix + ".update_field", "")} , format{config.getString(config_prefix + ".format")} - , sample_block{sample_block} - , context(context) + , sample_block{sample_block_} + , context(context_) , timeouts(ConnectionTimeouts::getHTTPTimeouts(context)) { } diff --git a/dbms/src/Dictionaries/HTTPDictionarySource.h b/dbms/src/Dictionaries/HTTPDictionarySource.h index 78fe5193533..705095193d1 100644 --- a/dbms/src/Dictionaries/HTTPDictionarySource.h +++ b/dbms/src/Dictionaries/HTTPDictionarySource.h @@ -22,8 +22,8 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block, - const Context & context); + Block & sample_block_, + const Context & context_); HTTPDictionarySource(const HTTPDictionarySource & other); HTTPDictionarySource & operator=(const HTTPDictionarySource &) = delete; diff --git a/dbms/src/Dictionaries/HashedDictionary.cpp b/dbms/src/Dictionaries/HashedDictionary.cpp index 413cfadec39..9b853ac2df5 100644 --- a/dbms/src/Dictionaries/HashedDictionary.cpp +++ b/dbms/src/Dictionaries/HashedDictionary.cpp @@ -16,18 +16,18 @@ namespace ErrorCodes HashedDictionary::HashedDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) - , saved_block{std::move(saved_block)} + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) + , saved_block{std::move(saved_block_)} { createAttributes(); loadData(); @@ -106,7 +106,7 @@ void HashedDictionary::isInConstantVector(const Key child_id, const PaddedPODArr const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ const auto null_value = std::get(attribute.null_values); \ \ @@ -132,7 +132,7 @@ DECLARE(Decimal128) void HashedDictionary::getString(const std::string & attribute_name, const PaddedPODArray & ids, ColumnString * out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto & null_value = StringRef{std::get(attribute.null_values)}; @@ -151,7 +151,7 @@ void HashedDictionary::getString(const std::string & attribute_name, const Padde ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, ids, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t row) { return def[row]; }); \ @@ -176,7 +176,7 @@ void HashedDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const ColumnString * const def, ColumnString * const out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -190,7 +190,7 @@ void HashedDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const TYPE & def, ResultArrayType & out) const \ { \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, ids, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t) { return def; }); \ @@ -215,7 +215,7 @@ void HashedDictionary::getString( const std::string & attribute_name, const PaddedPODArray & ids, const String & def, ColumnString * const out) const { const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -230,50 +230,50 @@ void HashedDictionary::has(const PaddedPODArray & ids, PaddedPODArray(attribute, ids, out); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: has(attribute, ids, out); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: has(attribute, ids, out); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: has(attribute, ids, out); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: has(attribute, ids, out); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: has(attribute, ids, out); break; } @@ -293,7 +293,7 @@ void HashedDictionary::createAttributes() { hierarchical_attribute = &attributes.back(); - if (hierarchical_attribute->type != AttributeUnderlyingType::UInt64) + if (hierarchical_attribute->type != AttributeUnderlyingType::utUInt64) throw Exception{name + ": hierarchical attribute must be UInt64.", ErrorCodes::TYPE_MISMATCH}; } } @@ -420,51 +420,51 @@ void HashedDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -488,51 +488,51 @@ HashedDictionary::Attribute HashedDictionary::createAttributeWithType(const Attr switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { attr.null_values = null_value.get(); attr.maps = std::make_unique>(); @@ -573,37 +573,37 @@ bool HashedDictionary::setAttributeValue(Attribute & attribute, const Key id, co { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return setAttributeValueImpl(attribute, id, value.get()); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { auto & map = *std::get>(attribute.maps); const auto & string = value.get(); @@ -655,36 +655,36 @@ PaddedPODArray HashedDictionary::getIds() const switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return getIds(attribute); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return getIds(attribute); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return getIds(attribute); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return getIds(attribute); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return getIds(attribute); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return getIds(attribute); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return getIds(attribute); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return getIds(attribute); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return getIds(attribute); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return getIds(attribute); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return getIds(attribute); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: return getIds(attribute); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return getIds(attribute); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return getIds(attribute); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return getIds(attribute); } return PaddedPODArray(); diff --git a/dbms/src/Dictionaries/HashedDictionary.h b/dbms/src/Dictionaries/HashedDictionary.h index 92875f27cf3..d1aa5a38d97 100644 --- a/dbms/src/Dictionaries/HashedDictionary.h +++ b/dbms/src/Dictionaries/HashedDictionary.h @@ -21,12 +21,12 @@ class HashedDictionary final : public IDictionary { public: HashedDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty, - BlockPtr saved_block = nullptr); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_, + BlockPtr saved_block_ = nullptr); std::string getName() const override { return name; } diff --git a/dbms/src/Dictionaries/LibraryDictionarySource.cpp b/dbms/src/Dictionaries/LibraryDictionarySource.cpp index 1e11a2ed011..b4de6506db1 100644 --- a/dbms/src/Dictionaries/LibraryDictionarySource.cpp +++ b/dbms/src/Dictionaries/LibraryDictionarySource.cpp @@ -120,13 +120,13 @@ namespace LibraryDictionarySource::LibraryDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, - const std::string & config_prefix, - Block & sample_block) + const std::string & config_prefix_, + Block & sample_block_) : log(&Logger::get("LibraryDictionarySource")) , dict_struct{dict_struct_} - , config_prefix{config_prefix} + , config_prefix{config_prefix_} , path{config.getString(config_prefix + ".path", "")} - , sample_block{sample_block} + , sample_block{sample_block_} { if (!Poco::File(path).exists()) throw Exception( diff --git a/dbms/src/Dictionaries/LibraryDictionarySource.h b/dbms/src/Dictionaries/LibraryDictionarySource.h index d09e5eee691..5d18f114177 100644 --- a/dbms/src/Dictionaries/LibraryDictionarySource.h +++ b/dbms/src/Dictionaries/LibraryDictionarySource.h @@ -31,8 +31,8 @@ public: LibraryDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, - const std::string & config_prefix, - Block & sample_block); + const std::string & config_prefix_, + Block & sample_block_); LibraryDictionarySource(const LibraryDictionarySource & other); LibraryDictionarySource & operator=(const LibraryDictionarySource &) = delete; diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp b/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp index f9199fa90bb..363147c484e 100644 --- a/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp +++ b/dbms/src/Dictionaries/MongoDBBlockInputStream.cpp @@ -33,8 +33,8 @@ MongoDBBlockInputStream::MongoDBBlockInputStream( std::shared_ptr & connection_, std::unique_ptr cursor_, const Block & sample_block, - const UInt64 max_block_size) - : connection(connection_), cursor{std::move(cursor_)}, max_block_size{max_block_size} + const UInt64 max_block_size_) + : connection(connection_), cursor{std::move(cursor_)}, max_block_size{max_block_size_} { description.init(sample_block); } @@ -87,38 +87,38 @@ namespace { switch (type) { - case ValueType::UInt8: + case ValueType::vtUInt8: insertNumber(column, value, name); break; - case ValueType::UInt16: + case ValueType::vtUInt16: insertNumber(column, value, name); break; - case ValueType::UInt32: + case ValueType::vtUInt32: insertNumber(column, value, name); break; - case ValueType::UInt64: + case ValueType::vtUInt64: insertNumber(column, value, name); break; - case ValueType::Int8: + case ValueType::vtInt8: insertNumber(column, value, name); break; - case ValueType::Int16: + case ValueType::vtInt16: insertNumber(column, value, name); break; - case ValueType::Int32: + case ValueType::vtInt32: insertNumber(column, value, name); break; - case ValueType::Int64: + case ValueType::vtInt64: insertNumber(column, value, name); break; - case ValueType::Float32: + case ValueType::vtFloat32: insertNumber(column, value, name); break; - case ValueType::Float64: + case ValueType::vtFloat64: insertNumber(column, value, name); break; - case ValueType::String: + case ValueType::vtString: { if (value.type() == Poco::MongoDB::ElementTraits::TypeId) { @@ -137,7 +137,7 @@ namespace ErrorCodes::TYPE_MISMATCH}; } - case ValueType::Date: + case ValueType::vtDate: { if (value.type() != Poco::MongoDB::ElementTraits::TypeId) throw Exception{"Type mismatch, expected Timestamp, got type id = " + toString(value.type()) + " for column " + name, @@ -148,7 +148,7 @@ namespace break; } - case ValueType::DateTime: + case ValueType::vtDateTime: { if (value.type() != Poco::MongoDB::ElementTraits::TypeId) throw Exception{"Type mismatch, expected Timestamp, got type id = " + toString(value.type()) + " for column " + name, @@ -158,7 +158,7 @@ namespace static_cast &>(value).value().epochTime()); break; } - case ValueType::UUID: + case ValueType::vtUUID: { if (value.type() == Poco::MongoDB::ElementTraits::TypeId) { diff --git a/dbms/src/Dictionaries/MongoDBBlockInputStream.h b/dbms/src/Dictionaries/MongoDBBlockInputStream.h index cf759692e9f..d5d692c827c 100644 --- a/dbms/src/Dictionaries/MongoDBBlockInputStream.h +++ b/dbms/src/Dictionaries/MongoDBBlockInputStream.h @@ -25,7 +25,7 @@ public: std::shared_ptr & connection_, std::unique_ptr cursor_, const Block & sample_block, - const UInt64 max_block_size); + const UInt64 max_block_size_); ~MongoDBBlockInputStream() override; diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp b/dbms/src/Dictionaries/MongoDBDictionarySource.cpp index 73ffd4727fa..18d9f840426 100644 --- a/dbms/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/dbms/src/Dictionaries/MongoDBDictionarySource.cpp @@ -168,24 +168,24 @@ authenticate(Poco::MongoDB::Connection & connection, const std::string & databas MongoDBDictionarySource::MongoDBDictionarySource( - const DictionaryStructure & dict_struct, - const std::string & host, - UInt16 port, - const std::string & user, - const std::string & password, - const std::string & method, - const std::string & db, - const std::string & collection, - const Block & sample_block) - : dict_struct{dict_struct} - , host{host} - , port{port} - , user{user} - , password{password} - , method{method} - , db{db} - , collection{collection} - , sample_block{sample_block} + const DictionaryStructure & dict_struct_, + const std::string & host_, + UInt16 port_, + const std::string & user_, + const std::string & password_, + const std::string & method_, + const std::string & db_, + const std::string & collection_, + const Block & sample_block_) + : dict_struct{dict_struct_} + , host{host_} + , port{port_} + , user{user_} + , password{password_} + , method{method_} + , db{db_} + , collection{collection_} + , sample_block{sample_block_} , connection{std::make_shared(host, port)} { if (!user.empty()) @@ -202,12 +202,12 @@ MongoDBDictionarySource::MongoDBDictionarySource( MongoDBDictionarySource::MongoDBDictionarySource( - const DictionaryStructure & dict_struct, + const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - Block & sample_block) + Block & sample_block_) : MongoDBDictionarySource( - dict_struct, + dict_struct_, config.getString(config_prefix + ".host"), config.getUInt(config_prefix + ".port"), config.getString(config_prefix + ".user", ""), @@ -215,7 +215,7 @@ MongoDBDictionarySource::MongoDBDictionarySource( config.getString(config_prefix + ".method", ""), config.getString(config_prefix + ".db", ""), config.getString(config_prefix + ".collection"), - sample_block) + sample_block_) { } @@ -297,27 +297,27 @@ BlockInputStreamPtr MongoDBDictionarySource::loadKeys(const Columns & key_column { switch (attr.second.underlying_type) { - case AttributeUnderlyingType::UInt8: - case AttributeUnderlyingType::UInt16: - case AttributeUnderlyingType::UInt32: - case AttributeUnderlyingType::UInt64: - case AttributeUnderlyingType::UInt128: - case AttributeUnderlyingType::Int8: - case AttributeUnderlyingType::Int16: - case AttributeUnderlyingType::Int32: - case AttributeUnderlyingType::Int64: - case AttributeUnderlyingType::Decimal32: - case AttributeUnderlyingType::Decimal64: - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utUInt8: + case AttributeUnderlyingType::utUInt16: + case AttributeUnderlyingType::utUInt32: + case AttributeUnderlyingType::utUInt64: + case AttributeUnderlyingType::utUInt128: + case AttributeUnderlyingType::utInt8: + case AttributeUnderlyingType::utInt16: + case AttributeUnderlyingType::utInt32: + case AttributeUnderlyingType::utInt64: + case AttributeUnderlyingType::utDecimal32: + case AttributeUnderlyingType::utDecimal64: + case AttributeUnderlyingType::utDecimal128: key.add(attr.second.name, Int32(key_columns[attr.first]->get64(row_idx))); break; - case AttributeUnderlyingType::Float32: - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat32: + case AttributeUnderlyingType::utFloat64: key.add(attr.second.name, applyVisitor(FieldVisitorConvertToNumber(), (*key_columns[attr.first])[row_idx])); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: String _str(get((*key_columns[attr.first])[row_idx])); /// Convert string to ObjectID if (attr.second.is_object_id) diff --git a/dbms/src/Dictionaries/MongoDBDictionarySource.h b/dbms/src/Dictionaries/MongoDBDictionarySource.h index 6cb627ec7ed..dd53cca10c0 100644 --- a/dbms/src/Dictionaries/MongoDBDictionarySource.h +++ b/dbms/src/Dictionaries/MongoDBDictionarySource.h @@ -27,15 +27,15 @@ namespace DB class MongoDBDictionarySource final : public IDictionarySource { MongoDBDictionarySource( - const DictionaryStructure & dict_struct, - const std::string & host, - UInt16 port, - const std::string & user, - const std::string & password, - const std::string & method, - const std::string & db, - const std::string & collection, - const Block & sample_block); + const DictionaryStructure & dict_struct_, + const std::string & host_, + UInt16 port_, + const std::string & user_, + const std::string & password_, + const std::string & method_, + const std::string & db_, + const std::string & collection_, + const Block & sample_block_); public: MongoDBDictionarySource( diff --git a/dbms/src/Dictionaries/MySQLDictionarySource.cpp b/dbms/src/Dictionaries/MySQLDictionarySource.cpp index 73b9b089806..497448bf64c 100644 --- a/dbms/src/Dictionaries/MySQLDictionarySource.cpp +++ b/dbms/src/Dictionaries/MySQLDictionarySource.cpp @@ -57,7 +57,7 @@ MySQLDictionarySource::MySQLDictionarySource( const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - const Block & sample_block) + const Block & sample_block_) : log(&Logger::get("MySQLDictionarySource")) , update_time{std::chrono::system_clock::from_time_t(0)} , dict_struct{dict_struct_} @@ -66,7 +66,7 @@ MySQLDictionarySource::MySQLDictionarySource( , where{config.getString(config_prefix + ".where", "")} , update_field{config.getString(config_prefix + ".update_field", "")} , dont_check_update_time{config.getBool(config_prefix + ".dont_check_update_time", false)} - , sample_block{sample_block} + , sample_block{sample_block_} , pool{config, config_prefix} , query_builder{dict_struct, db, table, where, IdentifierQuotingStyle::Backticks} , load_all_query{query_builder.composeLoadAllQuery()} diff --git a/dbms/src/Dictionaries/MySQLDictionarySource.h b/dbms/src/Dictionaries/MySQLDictionarySource.h index cfc45f42bb3..047bd860ee1 100644 --- a/dbms/src/Dictionaries/MySQLDictionarySource.h +++ b/dbms/src/Dictionaries/MySQLDictionarySource.h @@ -33,7 +33,7 @@ public: const DictionaryStructure & dict_struct_, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, - const Block & sample_block); + const Block & sample_block_); /// copy-constructor is provided in order to support cloneability MySQLDictionarySource(const MySQLDictionarySource & other); diff --git a/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h b/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h index 827667a7dc5..a2353051e5d 100644 --- a/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h +++ b/dbms/src/Dictionaries/RangeDictionaryBlockInputStream.h @@ -87,16 +87,16 @@ private: template RangeDictionaryBlockInputStream::RangeDictionaryBlockInputStream( - DictionaryPtr dictionary, - size_t max_block_size, - const Names & column_names, - PaddedPODArray && ids, + DictionaryPtr dictionary_, + size_t max_block_size_, + const Names & column_names_, + PaddedPODArray && ids_, PaddedPODArray && block_start_dates, PaddedPODArray && block_end_dates) - : DictionaryBlockInputStreamBase(ids.size(), max_block_size) - , dictionary(dictionary) - , column_names(column_names) - , ids(std::move(ids)) + : DictionaryBlockInputStreamBase(ids_.size(), max_block_size_) + , dictionary(dictionary_) + , column_names(column_names_) + , ids(std::move(ids_)) , start_dates(std::move(block_start_dates)) , end_dates(std::move(block_end_dates)) { @@ -231,49 +231,49 @@ Block RangeDictionaryBlockInputStream::fillBlock column = getColumnFromAttribute(&DictionaryType::get##TYPE, ids_to_fill, date_key, attribute, *dictionary) switch (attribute.underlying_type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: GET_COLUMN_FORM_ATTRIBUTE(UInt8); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: GET_COLUMN_FORM_ATTRIBUTE(UInt16); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: GET_COLUMN_FORM_ATTRIBUTE(UInt32); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: GET_COLUMN_FORM_ATTRIBUTE(UInt64); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: GET_COLUMN_FORM_ATTRIBUTE(UInt128); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: GET_COLUMN_FORM_ATTRIBUTE(Int8); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: GET_COLUMN_FORM_ATTRIBUTE(Int16); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: GET_COLUMN_FORM_ATTRIBUTE(Int32); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: GET_COLUMN_FORM_ATTRIBUTE(Int64); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: GET_COLUMN_FORM_ATTRIBUTE(Float32); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: GET_COLUMN_FORM_ATTRIBUTE(Float64); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: GET_COLUMN_FORM_ATTRIBUTE(Decimal32); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: GET_COLUMN_FORM_ATTRIBUTE(Decimal64); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: GET_COLUMN_FORM_ATTRIBUTE(Decimal128); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: column = getColumnFromAttributeString(ids_to_fill, date_key, attribute, *dictionary); break; } diff --git a/dbms/src/Dictionaries/RangeHashedDictionary.cpp b/dbms/src/Dictionaries/RangeHashedDictionary.cpp index 05f29e05c42..ab67ce59371 100644 --- a/dbms/src/Dictionaries/RangeHashedDictionary.cpp +++ b/dbms/src/Dictionaries/RangeHashedDictionary.cpp @@ -68,16 +68,16 @@ bool operator<(const RangeHashedDictionary::Range & left, const RangeHashedDicti RangeHashedDictionary::RangeHashedDictionary( - const std::string & dictionary_name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty) - : dictionary_name{dictionary_name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) + const std::string & dictionary_name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_) + : dictionary_name{dictionary_name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) { createAttributes(); loadData(); @@ -92,7 +92,7 @@ RangeHashedDictionary::RangeHashedDictionary( const PaddedPODArray & dates, \ ResultArrayType & out) const \ { \ - const auto & attribute = getAttributeWithType(attribute_name, AttributeUnderlyingType::TYPE); \ + const auto & attribute = getAttributeWithType(attribute_name, AttributeUnderlyingType::ut##TYPE); \ getItems(attribute, ids, dates, out); \ } DECLARE_MULTIPLE_GETTER(UInt8) @@ -117,7 +117,7 @@ void RangeHashedDictionary::getString( const PaddedPODArray & dates, ColumnString * out) const { - const auto & attribute = getAttributeWithType(attribute_name, AttributeUnderlyingType::String); + const auto & attribute = getAttributeWithType(attribute_name, AttributeUnderlyingType::utString); const auto & attr = *std::get>(attribute.maps); const auto & null_value = std::get(attribute.null_values); @@ -227,51 +227,51 @@ void RangeHashedDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -296,51 +296,51 @@ RangeHashedDictionary::createAttributeWithType(const AttributeUnderlyingType typ switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { attr.null_values = null_value.get(); attr.maps = std::make_unique>(); @@ -363,7 +363,7 @@ void RangeHashedDictionary::getItems( if (false) { } -#define DISPATCH(TYPE) else if (attribute.type == AttributeUnderlyingType::TYPE) getItemsImpl(attribute, ids, dates, out); +#define DISPATCH(TYPE) else if (attribute.type == AttributeUnderlyingType::ut##TYPE) getItemsImpl(attribute, ids, dates, out); DISPATCH(UInt8) DISPATCH(UInt16) DISPATCH(UInt32) @@ -443,51 +443,51 @@ void RangeHashedDictionary::setAttributeValue(Attribute & attribute, const Key i { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: setAttributeValueImpl(attribute, id, range, value.get()); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { auto & map = *std::get>(attribute.maps); const auto & string = value.get(); @@ -544,50 +544,50 @@ void RangeHashedDictionary::getIdsAndDates( switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: getIdsAndDates(attribute, ids, start_dates, end_dates); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: getIdsAndDates(attribute, ids, start_dates, end_dates); break; } diff --git a/dbms/src/Dictionaries/RangeHashedDictionary.h b/dbms/src/Dictionaries/RangeHashedDictionary.h index a02b1377db5..6e03fc30720 100644 --- a/dbms/src/Dictionaries/RangeHashedDictionary.h +++ b/dbms/src/Dictionaries/RangeHashedDictionary.h @@ -18,11 +18,11 @@ class RangeHashedDictionary final : public IDictionaryBase { public: RangeHashedDictionary( - const std::string & dictionary_name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty); + const std::string & dictionary_name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_); std::string getName() const override { return dictionary_name; } diff --git a/dbms/src/Dictionaries/TrieDictionary.cpp b/dbms/src/Dictionaries/TrieDictionary.cpp index ac11272145e..8ab7d2f34af 100644 --- a/dbms/src/Dictionaries/TrieDictionary.cpp +++ b/dbms/src/Dictionaries/TrieDictionary.cpp @@ -35,16 +35,16 @@ namespace ErrorCodes } TrieDictionary::TrieDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty) - : name{name} - , dict_struct(dict_struct) - , source_ptr{std::move(source_ptr)} - , dict_lifetime(dict_lifetime) - , require_nonempty(require_nonempty) + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_) + : name{name_} + , dict_struct(dict_struct_) + , source_ptr{std::move(source_ptr_)} + , dict_lifetime(dict_lifetime_) + , require_nonempty(require_nonempty_) , logger(&Poco::Logger::get("TrieDictionary")) { createAttributes(); @@ -75,7 +75,7 @@ TrieDictionary::~TrieDictionary() validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ const auto null_value = std::get(attribute.null_values); \ \ @@ -107,7 +107,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); const auto & null_value = StringRef{std::get(attribute.null_values)}; @@ -129,7 +129,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, \ @@ -163,7 +163,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -183,7 +183,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); \ \ const auto & attribute = getAttribute(attribute_name); \ - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::TYPE); \ + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::ut##TYPE); \ \ getItemsImpl( \ attribute, key_columns, [&](const size_t row, const auto value) { out[row] = value; }, [&](const size_t) { return def; }); \ @@ -214,7 +214,7 @@ void TrieDictionary::getString( validateKeyTypes(key_types); const auto & attribute = getAttribute(attribute_name); - checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::String); + checkAttributeType(name, attribute_name, attribute.type, AttributeUnderlyingType::utString); getItemsImpl( attribute, @@ -231,50 +231,50 @@ void TrieDictionary::has(const Columns & key_columns, const DataTypes & key_type switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: has(attribute, key_columns, out); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: has(attribute, key_columns, out); break; } @@ -356,51 +356,51 @@ void TrieDictionary::calculateBytesAllocated() { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: addAttributeSize(attribute); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: addAttributeSize(attribute); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { addAttributeSize(attribute); bytes_allocated += sizeof(Arena) + attribute.string_arena->size(); @@ -438,51 +438,51 @@ TrieDictionary::Attribute TrieDictionary::createAttributeWithType(const Attribut switch (type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: createAttributeImpl(attr, null_value); break; - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { attr.null_values = null_value.get(); attr.maps.emplace>(); @@ -575,37 +575,37 @@ bool TrieDictionary::setAttributeValue(Attribute & attribute, const StringRef ke { switch (attribute.type) { - case AttributeUnderlyingType::UInt8: + case AttributeUnderlyingType::utUInt8: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt16: + case AttributeUnderlyingType::utUInt16: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt32: + case AttributeUnderlyingType::utUInt32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt64: + case AttributeUnderlyingType::utUInt64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::UInt128: + case AttributeUnderlyingType::utUInt128: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int8: + case AttributeUnderlyingType::utInt8: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int16: + case AttributeUnderlyingType::utInt16: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int32: + case AttributeUnderlyingType::utInt32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Int64: + case AttributeUnderlyingType::utInt64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Float32: + case AttributeUnderlyingType::utFloat32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Float64: + case AttributeUnderlyingType::utFloat64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal32: + case AttributeUnderlyingType::utDecimal32: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal64: + case AttributeUnderlyingType::utDecimal64: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::Decimal128: + case AttributeUnderlyingType::utDecimal128: return setAttributeValueImpl(attribute, key, value.get()); - case AttributeUnderlyingType::String: + case AttributeUnderlyingType::utString: { const auto & string = value.get(); const auto string_in_arena = attribute.string_arena->insert(string.data(), string.size()); diff --git a/dbms/src/Dictionaries/TrieDictionary.h b/dbms/src/Dictionaries/TrieDictionary.h index a873f7bdd16..18b1b1c79b9 100644 --- a/dbms/src/Dictionaries/TrieDictionary.h +++ b/dbms/src/Dictionaries/TrieDictionary.h @@ -23,11 +23,11 @@ class TrieDictionary final : public IDictionaryBase { public: TrieDictionary( - const std::string & name, - const DictionaryStructure & dict_struct, - DictionarySourcePtr source_ptr, - const DictionaryLifetime dict_lifetime, - bool require_nonempty); + const std::string & name_, + const DictionaryStructure & dict_struct_, + DictionarySourcePtr source_ptr_, + const DictionaryLifetime dict_lifetime_, + bool require_nonempty_); ~TrieDictionary() override; diff --git a/dbms/src/Dictionaries/XDBCDictionarySource.cpp b/dbms/src/Dictionaries/XDBCDictionarySource.cpp index 243d8213f0b..627092844ec 100644 --- a/dbms/src/Dictionaries/XDBCDictionarySource.cpp +++ b/dbms/src/Dictionaries/XDBCDictionarySource.cpp @@ -40,8 +40,8 @@ namespace const Context & context, UInt64 max_block_size, const ConnectionTimeouts & timeouts, - const String name) - : name(name) + const String name_) + : name(name_) { read_buf = std::make_unique(uri, Poco::Net::HTTPRequest::HTTP_POST, callback, timeouts); reader diff --git a/dbms/src/Formats/BlockInputStreamFromRowInputStream.cpp b/dbms/src/Formats/BlockInputStreamFromRowInputStream.cpp index 2335363db70..fc38b476e0b 100644 --- a/dbms/src/Formats/BlockInputStreamFromRowInputStream.cpp +++ b/dbms/src/Formats/BlockInputStreamFromRowInputStream.cpp @@ -76,15 +76,15 @@ Block BlockInputStreamFromRowInputStream::readImpl() try { ++total_rows; - RowReadExtension info; - if (!row_input->read(columns, info)) + RowReadExtension info_; + if (!row_input->read(columns, info_)) break; if (read_virtual_columns_callback) read_virtual_columns_callback(); - for (size_t column_idx = 0; column_idx < info.read_columns.size(); ++column_idx) + for (size_t column_idx = 0; column_idx < info_.read_columns.size(); ++column_idx) { - if (!info.read_columns[column_idx]) + if (!info_.read_columns[column_idx]) { size_t column_size = columns[column_idx]->size(); if (column_size == 0) diff --git a/dbms/src/Formats/CSVRowInputStream.cpp b/dbms/src/Formats/CSVRowInputStream.cpp index 635fef82cd0..662e6306e25 100644 --- a/dbms/src/Formats/CSVRowInputStream.cpp +++ b/dbms/src/Formats/CSVRowInputStream.cpp @@ -92,8 +92,8 @@ static void skipRow(ReadBuffer & istr, const FormatSettings::CSV & settings, siz } -CSVRowInputStream::CSVRowInputStream(ReadBuffer & istr_, const Block & header_, bool with_names_, const FormatSettings & format_settings) - : istr(istr_), header(header_), with_names(with_names_), format_settings(format_settings) +CSVRowInputStream::CSVRowInputStream(ReadBuffer & istr_, const Block & header_, bool with_names_, const FormatSettings & format_settings_) + : istr(istr_), header(header_), with_names(with_names_), format_settings(format_settings_) { const auto num_columns = header.columns(); diff --git a/dbms/src/Formats/CSVRowInputStream.h b/dbms/src/Formats/CSVRowInputStream.h index 6cb0fe8e82f..b398858ee78 100644 --- a/dbms/src/Formats/CSVRowInputStream.h +++ b/dbms/src/Formats/CSVRowInputStream.h @@ -21,7 +21,7 @@ class CSVRowInputStream : public IRowInputStream public: /** with_names - in the first line the header with column names */ - CSVRowInputStream(ReadBuffer & istr_, const Block & header_, bool with_names_, const FormatSettings & format_settings); + CSVRowInputStream(ReadBuffer & istr_, const Block & header_, bool with_names_, const FormatSettings & format_settings_); bool read(MutableColumns & columns, RowReadExtension & ext) override; void readPrefix() override; diff --git a/dbms/src/Formats/MySQLBlockInputStream.cpp b/dbms/src/Formats/MySQLBlockInputStream.cpp index 1eeb0981afd..1896dbcc4b6 100644 --- a/dbms/src/Formats/MySQLBlockInputStream.cpp +++ b/dbms/src/Formats/MySQLBlockInputStream.cpp @@ -20,8 +20,8 @@ namespace ErrorCodes MySQLBlockInputStream::MySQLBlockInputStream( - const mysqlxx::PoolWithFailover::Entry & entry, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size, const bool auto_close) - : entry{entry}, query{this->entry->query(query_str)}, result{query.use()}, max_block_size{max_block_size}, auto_close{auto_close} + const mysqlxx::PoolWithFailover::Entry & entry_, const std::string & query_str, const Block & sample_block, const UInt64 max_block_size_, const bool auto_close_) + : entry{entry_}, query{this->entry->query(query_str)}, result{query.use()}, max_block_size{max_block_size_}, auto_close{auto_close_} { if (sample_block.columns() != result.getNumFields()) throw Exception{"mysqlxx::UseQueryResult contains " + toString(result.getNumFields()) + " columns while " @@ -40,46 +40,46 @@ namespace { switch (type) { - case ValueType::UInt8: + case ValueType::vtUInt8: static_cast(column).insertValue(value.getUInt()); break; - case ValueType::UInt16: + case ValueType::vtUInt16: static_cast(column).insertValue(value.getUInt()); break; - case ValueType::UInt32: + case ValueType::vtUInt32: static_cast(column).insertValue(value.getUInt()); break; - case ValueType::UInt64: + case ValueType::vtUInt64: static_cast(column).insertValue(value.getUInt()); break; - case ValueType::Int8: + case ValueType::vtInt8: static_cast(column).insertValue(value.getInt()); break; - case ValueType::Int16: + case ValueType::vtInt16: static_cast(column).insertValue(value.getInt()); break; - case ValueType::Int32: + case ValueType::vtInt32: static_cast(column).insertValue(value.getInt()); break; - case ValueType::Int64: + case ValueType::vtInt64: static_cast(column).insertValue(value.getInt()); break; - case ValueType::Float32: + case ValueType::vtFloat32: static_cast(column).insertValue(value.getDouble()); break; - case ValueType::Float64: + case ValueType::vtFloat64: static_cast(column).insertValue(value.getDouble()); break; - case ValueType::String: + case ValueType::vtString: static_cast(column).insertData(value.data(), value.size()); break; - case ValueType::Date: + case ValueType::vtDate: static_cast(column).insertValue(UInt16(value.getDate().getDayNum())); break; - case ValueType::DateTime: + case ValueType::vtDateTime: static_cast(column).insertValue(UInt32(value.getDateTime())); break; - case ValueType::UUID: + case ValueType::vtUUID: static_cast(column).insert(parse(value.data(), value.size())); break; } diff --git a/dbms/src/Formats/MySQLBlockInputStream.h b/dbms/src/Formats/MySQLBlockInputStream.h index bba523ddab7..238994acbd8 100644 --- a/dbms/src/Formats/MySQLBlockInputStream.h +++ b/dbms/src/Formats/MySQLBlockInputStream.h @@ -15,11 +15,11 @@ class MySQLBlockInputStream final : public IBlockInputStream { public: MySQLBlockInputStream( - const mysqlxx::PoolWithFailover::Entry & entry, + const mysqlxx::PoolWithFailover::Entry & entry_, const std::string & query_str, const Block & sample_block, - const UInt64 max_block_size, - const bool auto_close = false); + const UInt64 max_block_size_, + const bool auto_close_ = false); String getName() const override { return "MySQL"; } diff --git a/dbms/src/Formats/ProtobufReader.h b/dbms/src/Formats/ProtobufReader.h index c2660369c67..d848215c294 100644 --- a/dbms/src/Formats/ProtobufReader.h +++ b/dbms/src/Formats/ProtobufReader.h @@ -231,7 +231,7 @@ public: bool readDecimal(Decimal64 &, UInt32, UInt32) { return false; } bool readDecimal(Decimal128 &, UInt32, UInt32) { return false; } bool readAggregateFunction(const AggregateFunctionPtr &, AggregateDataPtr, Arena &) { return false; } - bool maybeCanReadValue() const { return false; } + bool canReadMoreValues() const { return false; } }; } diff --git a/dbms/src/Formats/ProtobufWriter.h b/dbms/src/Formats/ProtobufWriter.h index f11fbcbc391..6e50e023532 100644 --- a/dbms/src/Formats/ProtobufWriter.h +++ b/dbms/src/Formats/ProtobufWriter.h @@ -108,7 +108,7 @@ private: { size_t start; size_t end; - Piece(size_t start, size_t end) : start(start), end(end) {} + Piece(size_t start_, size_t end_) : start(start_), end(end_) {} Piece() = default; }; @@ -116,8 +116,8 @@ private: { size_t num_pieces_at_start; size_t num_bytes_skipped_at_start; - NestedInfo(size_t num_pieces_at_start, size_t num_bytes_skipped_at_start) - : num_pieces_at_start(num_pieces_at_start), num_bytes_skipped_at_start(num_bytes_skipped_at_start) + NestedInfo(size_t num_pieces_at_start_, size_t num_bytes_skipped_at_start_) + : num_pieces_at_start(num_pieces_at_start_), num_bytes_skipped_at_start(num_bytes_skipped_at_start_) { } }; diff --git a/dbms/src/Formats/TabSeparatedRowInputStream.cpp b/dbms/src/Formats/TabSeparatedRowInputStream.cpp index 0c16c14e306..69850dbc455 100644 --- a/dbms/src/Formats/TabSeparatedRowInputStream.cpp +++ b/dbms/src/Formats/TabSeparatedRowInputStream.cpp @@ -48,8 +48,8 @@ static void checkForCarriageReturn(ReadBuffer & istr) TabSeparatedRowInputStream::TabSeparatedRowInputStream( - ReadBuffer & istr_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings) - : istr(istr_), header(header_), with_names(with_names_), with_types(with_types_), format_settings(format_settings) + ReadBuffer & istr_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_) + : istr(istr_), header(header_), with_names(with_names_), with_types(with_types_), format_settings(format_settings_) { const auto num_columns = header.columns(); diff --git a/dbms/src/Formats/TabSeparatedRowInputStream.h b/dbms/src/Formats/TabSeparatedRowInputStream.h index 3a0ed13c1bd..f8ebebbdfe4 100644 --- a/dbms/src/Formats/TabSeparatedRowInputStream.h +++ b/dbms/src/Formats/TabSeparatedRowInputStream.h @@ -23,7 +23,7 @@ public: * with_types - on the next line header with type names */ TabSeparatedRowInputStream( - ReadBuffer & istr_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings); + ReadBuffer & istr_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_); bool read(MutableColumns & columns, RowReadExtension & ext) override; void readPrefix() override; diff --git a/dbms/src/Functions/FunctionJoinGet.h b/dbms/src/Functions/FunctionJoinGet.h index 81f54b84333..9885b05657d 100644 --- a/dbms/src/Functions/FunctionJoinGet.h +++ b/dbms/src/Functions/FunctionJoinGet.h @@ -14,12 +14,12 @@ public: static constexpr auto name = "joinGet"; FunctionJoinGet( - TableStructureReadLockHolder table_lock, StoragePtr storage_join, JoinPtr join, const String & attr_name, DataTypePtr return_type) - : table_lock(std::move(table_lock)) - , storage_join(std::move(storage_join)) - , join(std::move(join)) - , attr_name(attr_name) - , return_type(std::move(return_type)) + TableStructureReadLockHolder table_lock_, StoragePtr storage_join_, JoinPtr join_, const String & attr_name_, DataTypePtr return_type_) + : table_lock(std::move(table_lock_)) + , storage_join(std::move(storage_join_)) + , join(std::move(join_)) + , attr_name(attr_name_) + , return_type(std::move(return_type_)) { } @@ -47,7 +47,7 @@ public: static constexpr auto name = "joinGet"; static FunctionBuilderPtr create(const Context & context) { return std::make_shared(context); } - FunctionBuilderJoinGet(const Context & context) : context(context) {} + FunctionBuilderJoinGet(const Context & context_) : context(context_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/FunctionMathUnary.h b/dbms/src/Functions/FunctionMathUnary.h index 363951510fe..caa61d76758 100644 --- a/dbms/src/Functions/FunctionMathUnary.h +++ b/dbms/src/Functions/FunctionMathUnary.h @@ -31,7 +31,7 @@ #endif -/** FastOps is a fast vector math library from Michael Parakhin (former Yandex CTO), +/** FastOps is a fast vector math library from Mikhail Parakhin (former Yandex CTO), * Enabled by default. */ #if USE_FASTOPS diff --git a/dbms/src/Functions/FunctionsBitmap.cpp b/dbms/src/Functions/FunctionsBitmap.cpp index d4367ae2c64..b2bb1e4f25a 100644 --- a/dbms/src/Functions/FunctionsBitmap.cpp +++ b/dbms/src/Functions/FunctionsBitmap.cpp @@ -9,6 +9,7 @@ void registerFunctionsBitmap(FunctionFactory & factory) { factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); diff --git a/dbms/src/Functions/FunctionsBitmap.h b/dbms/src/Functions/FunctionsBitmap.h index f64f04789cc..22964605b8b 100644 --- a/dbms/src/Functions/FunctionsBitmap.h +++ b/dbms/src/Functions/FunctionsBitmap.h @@ -30,6 +30,9 @@ namespace ErrorCodes * Convert bitmap to integer array: * bitmapToArray: bitmap -> integer[] * + * Return subset in specified range (not include the range_end): + * bitmapSubsetInRange: bitmap,integer,integer -> bitmap + * * Two bitmap and calculation: * bitmapAnd: bitmap,bitmap -> bitmap * @@ -240,6 +243,119 @@ private: } }; +class FunctionBitmapSubsetInRange : public IFunction +{ +public: + static constexpr auto name = "bitmapSubsetInRange"; + + static FunctionPtr create(const Context &) { return std::make_shared(); } + + String getName() const override { return name; } + + bool isVariadic() const override { return false; } + + size_t getNumberOfArguments() const override { return 3; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + const DataTypeAggregateFunction * bitmap_type = typeid_cast(arguments[0].get()); + if (!(bitmap_type && bitmap_type->getFunctionName() == AggregateFunctionGroupBitmapData::name())) + throw Exception( + "First argument for function " + getName() + " must be an bitmap but it has type " + arguments[0]->getName() + ".", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + auto arg_type1 = typeid_cast *>(arguments[1].get()); + if (!(arg_type1)) + throw Exception( + "Second argument for function " + getName() + " must be UInt32 but it has type " + arguments[1]->getName() + ".", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + auto arg_type2 = typeid_cast *>(arguments[1].get()); + if (!(arg_type2)) + throw Exception( + "Third argument for function " + getName() + " must be UInt32 but it has type " + arguments[2]->getName() + ".", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + return arguments[0]; + } + + bool useDefaultImplementationForConstants() const override { return true; } + + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override + { + const IDataType * from_type = block.getByPosition(arguments[0]).type.get(); + const DataTypeAggregateFunction * aggr_type = typeid_cast(from_type); + WhichDataType which(aggr_type->getArgumentsDataTypes()[0]); + if (which.isUInt8()) + executeIntType(block, arguments, result, input_rows_count); + else if (which.isUInt16()) + executeIntType(block, arguments, result, input_rows_count); + else if (which.isUInt32()) + executeIntType(block, arguments, result, input_rows_count); + else if (which.isUInt64()) + executeIntType(block, arguments, result, input_rows_count); + else + throw Exception( + "Unexpected type " + from_type->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + } + +private: + using ToType = UInt64; + + template + void executeIntType( + Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) + const + { + const IColumn * columns[3]; + bool is_column_const[3]; + const ColumnAggregateFunction * colAggFunc; + const PaddedPODArray * container0; + const PaddedPODArray * container1, * container2; + + for (size_t i = 0; i < 3; ++i) + { + columns[i] = block.getByPosition(arguments[i]).column.get(); + is_column_const[i] = isColumnConst(*columns[i]); + } + if (is_column_const[0]) + { + colAggFunc = typeid_cast(typeid_cast(columns[0])->getDataColumnPtr().get()); + } + else + { + colAggFunc = typeid_cast(columns[0]); + } + container0 = &colAggFunc->getData(); + if (is_column_const[1]) + container1 = &typeid_cast(typeid_cast(columns[1])->getDataColumnPtr().get())->getData(); + else + container1 = &typeid_cast(columns[1])->getData(); + if (is_column_const[2]) + container2 = &typeid_cast(typeid_cast(columns[2])->getDataColumnPtr().get())->getData(); + else + container2 = &typeid_cast(columns[2])->getData(); + + auto col_to = ColumnAggregateFunction::create(colAggFunc->getAggregateFunction()); + col_to->reserve(input_rows_count); + + for (size_t i = 0; i < input_rows_count; ++i) + { + const AggregateDataPtr dataPtr0 = is_column_const[0] ? (*container0)[0] : (*container0)[i]; + const AggregateFunctionGroupBitmapData& bd0 + = *reinterpret_cast*>(dataPtr0); + const UInt32 range_start = is_column_const[1] ? (*container1)[0] : (*container1)[i]; + const UInt32 range_end = is_column_const[2] ? (*container2)[0] : (*container2)[i]; + + auto bd2 = new AggregateFunctionGroupBitmapData(); + bd0.rbs.rb_range(range_start, range_end, bd2->rbs); + + col_to->insertFrom(reinterpret_cast(bd2)); + } + block.getByPosition(result).column = std::move(col_to); + } +}; + template class FunctionBitmapSelfCardinalityImpl : public IFunction { diff --git a/dbms/src/Functions/FunctionsComparison.h b/dbms/src/Functions/FunctionsComparison.h index 2da3e0a8970..c3d29339d55 100644 --- a/dbms/src/Functions/FunctionsComparison.h +++ b/dbms/src/Functions/FunctionsComparison.h @@ -554,8 +554,8 @@ public: static constexpr auto name = Name::name; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionComparison(const Context & context) - : context(context), + FunctionComparison(const Context & context_) + : context(context_), check_decimal_overflow(decimalCheckComparisonOverflow(context)) {} diff --git a/dbms/src/Functions/FunctionsConversion.h b/dbms/src/Functions/FunctionsConversion.h index b0fcc0103d6..f903210ef4f 100644 --- a/dbms/src/Functions/FunctionsConversion.h +++ b/dbms/src/Functions/FunctionsConversion.h @@ -1183,7 +1183,7 @@ struct UnknownMonotonicity }; template -struct ToIntMonotonicity +struct ToNumberMonotonicity { static bool has() { return true; } @@ -1358,21 +1358,21 @@ struct NameToFloat32 { static constexpr auto name = "toFloat32"; }; struct NameToFloat64 { static constexpr auto name = "toFloat64"; }; struct NameToUUID { static constexpr auto name = "toUUID"; }; -using FunctionToUInt8 = FunctionConvert>; -using FunctionToUInt16 = FunctionConvert>; -using FunctionToUInt32 = FunctionConvert>; -using FunctionToUInt64 = FunctionConvert>; -using FunctionToInt8 = FunctionConvert>; -using FunctionToInt16 = FunctionConvert>; -using FunctionToInt32 = FunctionConvert>; -using FunctionToInt64 = FunctionConvert>; -using FunctionToFloat32 = FunctionConvert; -using FunctionToFloat64 = FunctionConvert; -using FunctionToDate = FunctionConvert>; -using FunctionToDateTime = FunctionConvert>; -using FunctionToUUID = FunctionConvert>; +using FunctionToUInt8 = FunctionConvert>; +using FunctionToUInt16 = FunctionConvert>; +using FunctionToUInt32 = FunctionConvert>; +using FunctionToUInt64 = FunctionConvert>; +using FunctionToInt8 = FunctionConvert>; +using FunctionToInt16 = FunctionConvert>; +using FunctionToInt32 = FunctionConvert>; +using FunctionToInt64 = FunctionConvert>; +using FunctionToFloat32 = FunctionConvert>; +using FunctionToFloat64 = FunctionConvert>; +using FunctionToDate = FunctionConvert>; +using FunctionToDateTime = FunctionConvert>; +using FunctionToUUID = FunctionConvert>; using FunctionToString = FunctionConvert; -using FunctionToUnixTimestamp = FunctionConvert>; +using FunctionToUnixTimestamp = FunctionConvert>; using FunctionToDecimal32 = FunctionConvert, NameToDecimal32, UnknownMonotonicity>; using FunctionToDecimal64 = FunctionConvert, NameToDecimal64, UnknownMonotonicity>; using FunctionToDecimal128 = FunctionConvert, NameToDecimal128, UnknownMonotonicity>; @@ -1485,8 +1485,8 @@ class PreparedFunctionCast : public PreparedFunctionImpl public: using WrapperType = std::function; - explicit PreparedFunctionCast(WrapperType && wrapper_function, const char * name) - : wrapper_function(std::move(wrapper_function)), name(name) {} + explicit PreparedFunctionCast(WrapperType && wrapper_function_, const char * name_) + : wrapper_function(std::move(wrapper_function_)), name(name_) {} String getName() const override { return name; } @@ -1520,10 +1520,10 @@ public: using WrapperType = std::function; using MonotonicityForRange = std::function; - FunctionCast(const Context & context, const char * name, MonotonicityForRange && monotonicity_for_range - , const DataTypes & argument_types, const DataTypePtr & return_type) - : context(context), name(name), monotonicity_for_range(monotonicity_for_range) - , argument_types(argument_types), return_type(return_type) + FunctionCast(const Context & context_, const char * name_, MonotonicityForRange && monotonicity_for_range_ + , const DataTypes & argument_types_, const DataTypePtr & return_type_) + : context(context_), name(name_), monotonicity_for_range(monotonicity_for_range_) + , argument_types(argument_types_), return_type(return_type_) { } @@ -2164,7 +2164,7 @@ public: static constexpr auto name = "CAST"; static FunctionBuilderPtr create(const Context & context) { return std::make_shared(context); } - FunctionBuilderCast(const Context & context) : context(context) {} + FunctionBuilderCast(const Context & context_) : context(context_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/FunctionsExternalDictionaries.h b/dbms/src/Functions/FunctionsExternalDictionaries.h index 877d3e6f0f2..d86bc291212 100644 --- a/dbms/src/Functions/FunctionsExternalDictionaries.h +++ b/dbms/src/Functions/FunctionsExternalDictionaries.h @@ -74,7 +74,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictHas(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictHas(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } @@ -219,7 +219,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictGetString(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictGetString(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } @@ -414,7 +414,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictGetStringOrDefault(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictGetStringOrDefault(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } @@ -729,8 +729,8 @@ public: return std::make_shared(context.getExternalDictionaries(), dec_scale); } - FunctionDictGet(const ExternalDictionaries & dictionaries, UInt32 dec_scale = 0) - : dictionaries(dictionaries) + FunctionDictGet(const ExternalDictionaries & dictionaries_, UInt32 dec_scale = 0) + : dictionaries(dictionaries_) , decimal_scale(dec_scale) {} @@ -1000,8 +1000,8 @@ public: return std::make_shared(context.getExternalDictionaries(), dec_scale); } - FunctionDictGetOrDefault(const ExternalDictionaries & dictionaries, UInt32 dec_scale = 0) - : dictionaries(dictionaries) + FunctionDictGetOrDefault(const ExternalDictionaries & dictionaries_, UInt32 dec_scale = 0) + : dictionaries(dictionaries_) , decimal_scale(dec_scale) {} @@ -1290,7 +1290,7 @@ public: return std::make_shared(context.getExternalDictionaries(), context); } - FunctionDictGetNoType(const ExternalDictionaries & dictionaries, const Context & context) : dictionaries(dictionaries), context(context) {} + FunctionDictGetNoType(const ExternalDictionaries & dictionaries_, const Context & context_) : dictionaries(dictionaries_), context(context_) {} String getName() const override { return name; } @@ -1439,7 +1439,7 @@ public: return std::make_shared(context.getExternalDictionaries(), context); } - FunctionDictGetNoTypeOrDefault(const ExternalDictionaries & dictionaries, const Context & context) : dictionaries(dictionaries), context(context) {} + FunctionDictGetNoTypeOrDefault(const ExternalDictionaries & dictionaries_, const Context & context_) : dictionaries(dictionaries_), context(context_) {} String getName() const override { return name; } @@ -1582,7 +1582,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictGetHierarchy(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictGetHierarchy(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } @@ -1739,7 +1739,7 @@ public: return std::make_shared(context.getExternalDictionaries()); } - FunctionDictIsIn(const ExternalDictionaries & dictionaries) : dictionaries(dictionaries) {} + FunctionDictIsIn(const ExternalDictionaries & dictionaries_) : dictionaries(dictionaries_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/FunctionsExternalModels.h b/dbms/src/Functions/FunctionsExternalModels.h index ab193e0a2bc..210729db478 100644 --- a/dbms/src/Functions/FunctionsExternalModels.h +++ b/dbms/src/Functions/FunctionsExternalModels.h @@ -17,7 +17,7 @@ public: static FunctionPtr create(const Context & context); - explicit FunctionModelEvaluate(const ExternalModels & models) : models(models) {} + explicit FunctionModelEvaluate(const ExternalModels & models_) : models(models_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/FunctionsMiscellaneous.h b/dbms/src/Functions/FunctionsMiscellaneous.h index 6803e16abbe..96539f9559f 100644 --- a/dbms/src/Functions/FunctionsMiscellaneous.h +++ b/dbms/src/Functions/FunctionsMiscellaneous.h @@ -16,11 +16,11 @@ class FunctionExpression : public IFunctionBase, public IPreparedFunction, public std::enable_shared_from_this { public: - FunctionExpression(const ExpressionActionsPtr & expression_actions, - const DataTypes & argument_types, const Names & argument_names, - const DataTypePtr & return_type, const std::string & return_name) - : expression_actions(expression_actions), argument_types(argument_types), - argument_names(argument_names), return_type(return_type), return_name(return_name) + FunctionExpression(const ExpressionActionsPtr & expression_actions_, + const DataTypes & argument_types_, const Names & argument_names_, + const DataTypePtr & return_type_, const std::string & return_name_) + : expression_actions(expression_actions_), argument_types(argument_types_), + argument_names(argument_names_), return_type(return_type_), return_name(return_name_) { } @@ -65,11 +65,11 @@ class FunctionCapture : public IFunctionBase, public IPreparedFunction, public F public std::enable_shared_from_this { public: - FunctionCapture(const ExpressionActionsPtr & expression_actions, const Names & captured, - const NamesAndTypesList & lambda_arguments, - const DataTypePtr & function_return_type, const std::string & expression_return_name) - : expression_actions(expression_actions), captured_names(captured), lambda_arguments(lambda_arguments) - , function_return_type(function_return_type), expression_return_name(expression_return_name) + FunctionCapture(const ExpressionActionsPtr & expression_actions_, const Names & captured, + const NamesAndTypesList & lambda_arguments_, + const DataTypePtr & function_return_type_, const std::string & expression_return_name_) + : expression_actions(expression_actions_), captured_names(captured), lambda_arguments(lambda_arguments_) + , function_return_type(function_return_type_), expression_return_name(expression_return_name_) { const auto & all_arguments = expression_actions->getRequiredColumnsWithTypes(); diff --git a/dbms/src/Functions/FunctionsRound.h b/dbms/src/Functions/FunctionsRound.h index 38b72274f5a..bc276435cc8 100644 --- a/dbms/src/Functions/FunctionsRound.h +++ b/dbms/src/Functions/FunctionsRound.h @@ -574,7 +574,7 @@ class FunctionRoundDown : public IFunction public: static constexpr auto name = "roundDown"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionRoundDown(const Context & context) : context(context) {} + FunctionRoundDown(const Context & context_) : context(context_) {} public: String getName() const override { return name; } diff --git a/dbms/src/Functions/GatherUtils/Sinks.h b/dbms/src/Functions/GatherUtils/Sinks.h index cd454f7ca2e..c6925fab865 100644 --- a/dbms/src/Functions/GatherUtils/Sinks.h +++ b/dbms/src/Functions/GatherUtils/Sinks.h @@ -196,8 +196,8 @@ struct NullableArraySink : public ArraySink NullMap & null_map; - NullableArraySink(ColumnArray & arr, NullMap & null_map, size_t column_size) - : ArraySink(arr, column_size), null_map(null_map) + NullableArraySink(ColumnArray & arr, NullMap & null_map_, size_t column_size) + : ArraySink(arr, column_size), null_map(null_map_) { } diff --git a/dbms/src/Functions/GatherUtils/Sources.h b/dbms/src/Functions/GatherUtils/Sources.h index 41e1a7a0b84..d43dc69b2b0 100644 --- a/dbms/src/Functions/GatherUtils/Sources.h +++ b/dbms/src/Functions/GatherUtils/Sources.h @@ -585,8 +585,8 @@ struct NullableArraySource : public ArraySource const NullMap & null_map; - NullableArraySource(const ColumnArray & arr, const NullMap & null_map) - : ArraySource(arr), null_map(null_map) + NullableArraySource(const ColumnArray & arr, const NullMap & null_map_) + : ArraySource(arr), null_map(null_map_) { } @@ -743,7 +743,7 @@ struct NullableValueSource : public ValueSource const NullMap & null_map; template - explicit NullableValueSource(const Column & col, const NullMap & null_map) : ValueSource(col), null_map(null_map) {} + explicit NullableValueSource(const Column & col, const NullMap & null_map_) : ValueSource(col), null_map(null_map_) {} void accept(ValueSourceVisitor & visitor) override { visitor.visit(*this); } diff --git a/dbms/src/Functions/GeoUtils.h b/dbms/src/Functions/GeoUtils.h index 9c5ebf98b16..2191290d858 100644 --- a/dbms/src/Functions/GeoUtils.h +++ b/dbms/src/Functions/GeoUtils.h @@ -91,8 +91,8 @@ public: using Box = boost::geometry::model::box; using Segment = boost::geometry::model::segment; - explicit PointInPolygonWithGrid(const Polygon & polygon, UInt16 grid_size = 8) - : grid_size(std::max(1, grid_size)), polygon(polygon) {} + explicit PointInPolygonWithGrid(const Polygon & polygon_, UInt16 grid_size_ = 8) + : grid_size(std::max(1, grid_size_)), polygon(polygon_) {} void init(); @@ -510,7 +510,7 @@ public: using Polygon = boost::geometry::model::polygon; using Box = boost::geometry::model::box; - explicit PointInPolygon(const Polygon & polygon) : polygon(polygon) {} + explicit PointInPolygon(const Polygon & polygon_) : polygon(polygon_) {} void init() { diff --git a/dbms/src/Functions/IFunction.h b/dbms/src/Functions/IFunction.h index ef7e882e700..287e7a84170 100644 --- a/dbms/src/Functions/IFunction.h +++ b/dbms/src/Functions/IFunction.h @@ -408,7 +408,7 @@ protected: class DefaultExecutable final : public PreparedFunctionImpl { public: - explicit DefaultExecutable(std::shared_ptr function) : function(std::move(function)) {} + explicit DefaultExecutable(std::shared_ptr function_) : function(std::move(function_)) {} String getName() const override { return function->getName(); } @@ -434,8 +434,8 @@ private: class DefaultFunction final : public IFunctionBase { public: - DefaultFunction(std::shared_ptr function, DataTypes arguments, DataTypePtr return_type) - : function(std::move(function)), arguments(std::move(arguments)), return_type(std::move(return_type)) {} + DefaultFunction(std::shared_ptr function_, DataTypes arguments_, DataTypePtr return_type_) + : function(std::move(function_)), arguments(std::move(arguments_)), return_type(std::move(return_type_)) {} String getName() const override { return function->getName(); } @@ -478,7 +478,7 @@ private: class DefaultFunctionBuilder : public FunctionBuilderImpl { public: - explicit DefaultFunctionBuilder(std::shared_ptr function) : function(std::move(function)) {} + explicit DefaultFunctionBuilder(std::shared_ptr function_) : function(std::move(function_)) {} void checkNumberOfArguments(size_t number_of_arguments) const override { diff --git a/dbms/src/Functions/RapidJSONParser.h b/dbms/src/Functions/RapidJSONParser.h index c88d61fb69d..ff4ecd506fd 100644 --- a/dbms/src/Functions/RapidJSONParser.h +++ b/dbms/src/Functions/RapidJSONParser.h @@ -32,7 +32,7 @@ struct RapidJSONParser { public: Iterator() {} - Iterator(const rapidjson::Document & document) : value(&document) {} + Iterator(const rapidjson::Document & document_) : value(&document_) {} Iterator(const Iterator & src) : value(src.value) , is_object_member(src.is_object_member) diff --git a/dbms/src/Functions/array/array.cpp b/dbms/src/Functions/array/array.cpp index 6d641d13a69..0aa1f6f9bae 100644 --- a/dbms/src/Functions/array/array.cpp +++ b/dbms/src/Functions/array/array.cpp @@ -19,8 +19,8 @@ public: return std::make_shared(context); } - FunctionArray(const Context & context) - : context(context) + FunctionArray(const Context & context_) + : context(context_) { } diff --git a/dbms/src/Functions/array/arrayConcat.cpp b/dbms/src/Functions/array/arrayConcat.cpp index 42b92116bf2..32ba791ac5f 100644 --- a/dbms/src/Functions/array/arrayConcat.cpp +++ b/dbms/src/Functions/array/arrayConcat.cpp @@ -27,7 +27,7 @@ class FunctionArrayConcat : public IFunction public: static constexpr auto name = "arrayConcat"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayConcat(const Context & context) : context(context) {} + FunctionArrayConcat(const Context & context_) : context(context_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/arrayIntersect.cpp b/dbms/src/Functions/array/arrayIntersect.cpp index 4e88bbad920..b735d5497d6 100644 --- a/dbms/src/Functions/array/arrayIntersect.cpp +++ b/dbms/src/Functions/array/arrayIntersect.cpp @@ -35,7 +35,7 @@ class FunctionArrayIntersect : public IFunction public: static constexpr auto name = "arrayIntersect"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayIntersect(const Context & context) : context(context) {} + FunctionArrayIntersect(const Context & context_) : context(context_) {} String getName() const override { return name; } @@ -81,8 +81,8 @@ private: const DataTypePtr & data_type; ColumnPtr & result; - NumberExecutor(const UnpackedArrays & arrays, const DataTypePtr & data_type, ColumnPtr & result) - : arrays(arrays), data_type(data_type), result(result) {} + NumberExecutor(const UnpackedArrays & arrays_, const DataTypePtr & data_type_, ColumnPtr & result_) + : arrays(arrays_), data_type(data_type_), result(result_) {} template void operator()(); diff --git a/dbms/src/Functions/array/arrayPop.h b/dbms/src/Functions/array/arrayPop.h index 0336d2e20eb..f860dd4eede 100644 --- a/dbms/src/Functions/array/arrayPop.h +++ b/dbms/src/Functions/array/arrayPop.h @@ -18,7 +18,7 @@ namespace ErrorCodes class FunctionArrayPop : public IFunction { public: - FunctionArrayPop(bool pop_front, const char * name) : pop_front(pop_front), name(name) {} + FunctionArrayPop(bool pop_front_, const char * name_) : pop_front(pop_front_), name(name_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/arrayPush.h b/dbms/src/Functions/array/arrayPush.h index 4d06571ea71..3b471987cb7 100644 --- a/dbms/src/Functions/array/arrayPush.h +++ b/dbms/src/Functions/array/arrayPush.h @@ -21,8 +21,8 @@ namespace ErrorCodes class FunctionArrayPush : public IFunction { public: - FunctionArrayPush(const Context & context, bool push_front, const char * name) - : context(context), push_front(push_front), name(name) {} + FunctionArrayPush(const Context & context_, bool push_front_, const char * name_) + : context(context_), push_front(push_front_), name(name_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/arrayPushBack.cpp b/dbms/src/Functions/array/arrayPushBack.cpp index c5677cd3072..a9c4ed88a7a 100644 --- a/dbms/src/Functions/array/arrayPushBack.cpp +++ b/dbms/src/Functions/array/arrayPushBack.cpp @@ -10,7 +10,7 @@ class FunctionArrayPushBack : public FunctionArrayPush public: static constexpr auto name = "arrayPushBack"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayPushBack(const Context & context) : FunctionArrayPush(context, false, name) {} + FunctionArrayPushBack(const Context & context_) : FunctionArrayPush(context_, false, name) {} }; void registerFunctionArrayPushBack(FunctionFactory & factory) diff --git a/dbms/src/Functions/array/arrayPushFront.cpp b/dbms/src/Functions/array/arrayPushFront.cpp index 99172e0180c..e0cc56c8ae2 100644 --- a/dbms/src/Functions/array/arrayPushFront.cpp +++ b/dbms/src/Functions/array/arrayPushFront.cpp @@ -11,7 +11,7 @@ class FunctionArrayPushFront : public FunctionArrayPush public: static constexpr auto name = "arrayPushFront"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayPushFront(const Context & context) : FunctionArrayPush(context, true, name) {} + FunctionArrayPushFront(const Context & context_) : FunctionArrayPush(context_, true, name) {} }; diff --git a/dbms/src/Functions/array/arrayResize.cpp b/dbms/src/Functions/array/arrayResize.cpp index d4f37823e0b..201ee967b76 100644 --- a/dbms/src/Functions/array/arrayResize.cpp +++ b/dbms/src/Functions/array/arrayResize.cpp @@ -27,7 +27,7 @@ class FunctionArrayResize : public IFunction public: static constexpr auto name = "arrayResize"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayResize(const Context & context) : context(context) {} + FunctionArrayResize(const Context & context_) : context(context_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/arraySort.cpp b/dbms/src/Functions/array/arraySort.cpp index 59e65602539..17a711e8902 100644 --- a/dbms/src/Functions/array/arraySort.cpp +++ b/dbms/src/Functions/array/arraySort.cpp @@ -23,7 +23,7 @@ struct ArraySortImpl { const IColumn & column; - Less(const IColumn & column) : column(column) {} + Less(const IColumn & column_) : column(column_) {} bool operator()(size_t lhs, size_t rhs) const { diff --git a/dbms/src/Functions/array/hasAll.cpp b/dbms/src/Functions/array/hasAll.cpp index 19278e3e78e..6ae1640e382 100644 --- a/dbms/src/Functions/array/hasAll.cpp +++ b/dbms/src/Functions/array/hasAll.cpp @@ -10,7 +10,7 @@ class FunctionArrayHasAll : public FunctionArrayHasAllAny public: static constexpr auto name = "hasAll"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayHasAll(const Context & context) : FunctionArrayHasAllAny(context, true, name) {} + FunctionArrayHasAll(const Context & context_) : FunctionArrayHasAllAny(context_, true, name) {} }; void registerFunctionHasAll(FunctionFactory & factory) diff --git a/dbms/src/Functions/array/hasAllAny.h b/dbms/src/Functions/array/hasAllAny.h index b688406fd91..ef69594d01c 100644 --- a/dbms/src/Functions/array/hasAllAny.h +++ b/dbms/src/Functions/array/hasAllAny.h @@ -27,8 +27,8 @@ namespace ErrorCodes class FunctionArrayHasAllAny : public IFunction { public: - FunctionArrayHasAllAny(const Context & context, bool all, const char * name) - : context(context), all(all), name(name) {} + FunctionArrayHasAllAny(const Context & context_, bool all_, const char * name_) + : context(context_), all(all_), name(name_) {} String getName() const override { return name; } diff --git a/dbms/src/Functions/array/hasAny.cpp b/dbms/src/Functions/array/hasAny.cpp index 08275e1ba8e..756e5311b50 100644 --- a/dbms/src/Functions/array/hasAny.cpp +++ b/dbms/src/Functions/array/hasAny.cpp @@ -10,7 +10,7 @@ class FunctionArrayHasAny : public FunctionArrayHasAllAny public: static constexpr auto name = "hasAny"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionArrayHasAny(const Context & context) : FunctionArrayHasAllAny(context, false, name) {} + FunctionArrayHasAny(const Context & context_) : FunctionArrayHasAllAny(context_, false, name) {} }; void registerFunctionHasAny(FunctionFactory & factory) diff --git a/dbms/src/Functions/coalesce.cpp b/dbms/src/Functions/coalesce.cpp index 7ef7eeadfdb..947ecd0e199 100644 --- a/dbms/src/Functions/coalesce.cpp +++ b/dbms/src/Functions/coalesce.cpp @@ -26,7 +26,7 @@ public: return std::make_shared(context); } - FunctionCoalesce(const Context & context) : context(context) {} + FunctionCoalesce(const Context & context_) : context(context_) {} std::string getName() const override { diff --git a/dbms/src/Functions/concat.cpp b/dbms/src/Functions/concat.cpp index b613f91f12c..c4fa2044321 100644 --- a/dbms/src/Functions/concat.cpp +++ b/dbms/src/Functions/concat.cpp @@ -32,7 +32,7 @@ class ConcatImpl : public IFunction { public: static constexpr auto name = Name::name; - ConcatImpl(const Context & context) : context(context) {} + ConcatImpl(const Context & context_) : context(context_) {} static FunctionPtr create(const Context & context) { return std::make_shared(context); } String getName() const override { return name; } @@ -190,7 +190,7 @@ public: static constexpr auto name = "concat"; static FunctionBuilderPtr create(const Context & context) { return std::make_shared(context); } - FunctionBuilderConcat(const Context & context) : context(context) {} + FunctionBuilderConcat(const Context & context_) : context(context_) {} String getName() const override { return name; } size_t getNumberOfArguments() const override { return 0; } diff --git a/dbms/src/Functions/currentDatabase.cpp b/dbms/src/Functions/currentDatabase.cpp index 6c5137fe2ee..b1276d8546f 100644 --- a/dbms/src/Functions/currentDatabase.cpp +++ b/dbms/src/Functions/currentDatabase.cpp @@ -18,7 +18,7 @@ public: return std::make_shared(context.getCurrentDatabase()); } - explicit FunctionCurrentDatabase(const String & db_name) : db_name{db_name} + explicit FunctionCurrentDatabase(const String & db_name_) : db_name{db_name_} { } diff --git a/dbms/src/Functions/evalMLMethod.cpp b/dbms/src/Functions/evalMLMethod.cpp index e49bd917d1d..9f7445ba863 100644 --- a/dbms/src/Functions/evalMLMethod.cpp +++ b/dbms/src/Functions/evalMLMethod.cpp @@ -34,7 +34,7 @@ public: { return std::make_shared(context); } - FunctionEvalMLMethod(const Context & context) : context(context) + FunctionEvalMLMethod(const Context & context_) : context(context_) {} String getName() const override diff --git a/dbms/src/Functions/formatDateTime.cpp b/dbms/src/Functions/formatDateTime.cpp index ffbf391db6f..8cecdb69717 100644 --- a/dbms/src/Functions/formatDateTime.cpp +++ b/dbms/src/Functions/formatDateTime.cpp @@ -79,7 +79,7 @@ private: Func func; size_t shift; - Action(Func func, size_t shift = 0) : func(func), shift(shift) {} + Action(Func func_, size_t shift_ = 0) : func(func_), shift(shift_) {} void perform(char *& target, Time source, const DateLUTImpl & timezone) { diff --git a/dbms/src/Functions/if.cpp b/dbms/src/Functions/if.cpp index 6676ad87d75..a406b63be8b 100644 --- a/dbms/src/Functions/if.cpp +++ b/dbms/src/Functions/if.cpp @@ -170,7 +170,7 @@ class FunctionIf : public FunctionIfBase public: static constexpr auto name = "if"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionIf(const Context & context) : context(context) {} + FunctionIf(const Context & context_) : context(context_) {} private: template diff --git a/dbms/src/Functions/ifNull.cpp b/dbms/src/Functions/ifNull.cpp index bf517293409..2c552d86ffe 100644 --- a/dbms/src/Functions/ifNull.cpp +++ b/dbms/src/Functions/ifNull.cpp @@ -19,7 +19,7 @@ class FunctionIfNull : public IFunction public: static constexpr auto name = "ifNull"; - FunctionIfNull(const Context & context) : context(context) {} + FunctionIfNull(const Context & context_) : context(context_) {} static FunctionPtr create(const Context & context) { diff --git a/dbms/src/Functions/multiIf.cpp b/dbms/src/Functions/multiIf.cpp index 7ee5f1c5e67..f56889f7a01 100644 --- a/dbms/src/Functions/multiIf.cpp +++ b/dbms/src/Functions/multiIf.cpp @@ -32,7 +32,7 @@ class FunctionMultiIf final : public FunctionIfBase public: static constexpr auto name = "multiIf"; static FunctionPtr create(const Context & context) { return std::make_shared(context); } - FunctionMultiIf(const Context & context) : context(context) {} + FunctionMultiIf(const Context & context_) : context(context_) {} public: String getName() const override { return name; } diff --git a/dbms/src/Functions/nullIf.cpp b/dbms/src/Functions/nullIf.cpp index 0039dbad2b1..59ce4b54c82 100644 --- a/dbms/src/Functions/nullIf.cpp +++ b/dbms/src/Functions/nullIf.cpp @@ -25,7 +25,7 @@ public: return std::make_shared(context); } - FunctionNullIf(const Context & context) : context(context) {} + FunctionNullIf(const Context & context_) : context(context_) {} std::string getName() const override { diff --git a/dbms/src/Functions/reverse.cpp b/dbms/src/Functions/reverse.cpp index d8e6c6de194..269a3e3f7c2 100644 --- a/dbms/src/Functions/reverse.cpp +++ b/dbms/src/Functions/reverse.cpp @@ -118,7 +118,7 @@ public: static constexpr auto name = "reverse"; static FunctionBuilderPtr create(const Context & context) { return std::make_shared(context); } - FunctionBuilderReverse(const Context & context) : context(context) {} + FunctionBuilderReverse(const Context & context_) : context(context_) {} String getName() const override { return name; } size_t getNumberOfArguments() const override { return 1; } diff --git a/dbms/src/IO/LimitReadBuffer.cpp b/dbms/src/IO/LimitReadBuffer.cpp index a6e4e7a7b16..f36facfdd99 100644 --- a/dbms/src/IO/LimitReadBuffer.cpp +++ b/dbms/src/IO/LimitReadBuffer.cpp @@ -36,8 +36,8 @@ bool LimitReadBuffer::nextImpl() } -LimitReadBuffer::LimitReadBuffer(ReadBuffer & in, UInt64 limit, bool throw_exception, std::string exception_message) - : ReadBuffer(in.position(), 0), in(in), limit(limit), throw_exception(throw_exception), exception_message(std::move(exception_message)) +LimitReadBuffer::LimitReadBuffer(ReadBuffer & in_, UInt64 limit_, bool throw_exception_, std::string exception_message_) + : ReadBuffer(in_.position(), 0), in(in_), limit(limit_), throw_exception(throw_exception_), exception_message(std::move(exception_message_)) { size_t remaining_bytes_in_buffer = in.buffer().end() - in.position(); if (remaining_bytes_in_buffer > limit) diff --git a/dbms/src/IO/LimitReadBuffer.h b/dbms/src/IO/LimitReadBuffer.h index d16579be7c5..545de6fd4a2 100644 --- a/dbms/src/IO/LimitReadBuffer.h +++ b/dbms/src/IO/LimitReadBuffer.h @@ -21,7 +21,7 @@ private: bool nextImpl() override; public: - LimitReadBuffer(ReadBuffer & in, UInt64 limit, bool throw_exception, std::string exception_message = {}); + LimitReadBuffer(ReadBuffer & in_, UInt64 limit_, bool throw_exception_, std::string exception_message_ = {}); ~LimitReadBuffer() override; }; diff --git a/dbms/src/IO/MMapReadBufferFromFile.cpp b/dbms/src/IO/MMapReadBufferFromFile.cpp index 74c07c40782..45558b540e5 100644 --- a/dbms/src/IO/MMapReadBufferFromFile.cpp +++ b/dbms/src/IO/MMapReadBufferFromFile.cpp @@ -29,14 +29,15 @@ void MMapReadBufferFromFile::open(const std::string & file_name) fd = ::open(file_name.c_str(), O_RDONLY); if (-1 == fd) - throwFromErrno("Cannot open file " + file_name, errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + throwFromErrnoWithPath("Cannot open file " + file_name, file_name, + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); } -MMapReadBufferFromFile::MMapReadBufferFromFile(const std::string & file_name, size_t offset, size_t length) +MMapReadBufferFromFile::MMapReadBufferFromFile(const std::string & file_name, size_t offset, size_t length_) { open(file_name); - init(fd, offset, length); + init(fd, offset, length_); } diff --git a/dbms/src/IO/MMapReadBufferFromFile.h b/dbms/src/IO/MMapReadBufferFromFile.h index c1762bd54f5..6790f817b93 100644 --- a/dbms/src/IO/MMapReadBufferFromFile.h +++ b/dbms/src/IO/MMapReadBufferFromFile.h @@ -16,7 +16,7 @@ namespace DB class MMapReadBufferFromFile : public MMapReadBufferFromFileDescriptor { public: - MMapReadBufferFromFile(const std::string & file_name, size_t offset, size_t length); + MMapReadBufferFromFile(const std::string & file_name, size_t offset, size_t length_); /// Map till end of file. MMapReadBufferFromFile(const std::string & file_name, size_t offset); diff --git a/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp b/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp index 4643b9b626c..4852f9e57e9 100644 --- a/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp +++ b/dbms/src/IO/MMapReadBufferFromFileDescriptor.cpp @@ -57,17 +57,17 @@ void MMapReadBufferFromFileDescriptor::init(int fd_, size_t offset) } -MMapReadBufferFromFileDescriptor::MMapReadBufferFromFileDescriptor(int fd, size_t offset, size_t length) +MMapReadBufferFromFileDescriptor::MMapReadBufferFromFileDescriptor(int fd_, size_t offset_, size_t length_) : MMapReadBufferFromFileDescriptor() { - init(fd, offset, length); + init(fd_, offset_, length_); } -MMapReadBufferFromFileDescriptor::MMapReadBufferFromFileDescriptor(int fd, size_t offset) +MMapReadBufferFromFileDescriptor::MMapReadBufferFromFileDescriptor(int fd_, size_t offset_) : MMapReadBufferFromFileDescriptor() { - init(fd, offset); + init(fd_, offset_); } diff --git a/dbms/src/IO/MMapReadBufferFromFileDescriptor.h b/dbms/src/IO/MMapReadBufferFromFileDescriptor.h index f31aac0bbf9..aaef8c3212a 100644 --- a/dbms/src/IO/MMapReadBufferFromFileDescriptor.h +++ b/dbms/src/IO/MMapReadBufferFromFileDescriptor.h @@ -20,10 +20,10 @@ protected: void init(int fd_, size_t offset); public: - MMapReadBufferFromFileDescriptor(int fd, size_t offset, size_t length); + MMapReadBufferFromFileDescriptor(int fd_, size_t offset_, size_t length_); /// Map till end of file. - MMapReadBufferFromFileDescriptor(int fd, size_t offset); + MMapReadBufferFromFileDescriptor(int fd_, size_t offset_); ~MMapReadBufferFromFileDescriptor() override; diff --git a/dbms/src/IO/ReadBufferAIO.cpp b/dbms/src/IO/ReadBufferAIO.cpp index 50845330587..c3dd04b0027 100644 --- a/dbms/src/IO/ReadBufferAIO.cpp +++ b/dbms/src/IO/ReadBufferAIO.cpp @@ -54,7 +54,7 @@ ReadBufferAIO::ReadBufferAIO(const std::string & filename_, size_t buffer_size_, if (fd == -1) { auto error_code = (errno == ENOENT) ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE; - throwFromErrno("Cannot open file " + filename, error_code); + throwFromErrnoWithPath("Cannot open file " + filename, filename, error_code); } } diff --git a/dbms/src/IO/ReadBufferFromFile.cpp b/dbms/src/IO/ReadBufferFromFile.cpp index b94fce8e033..b9cd7caf155 100644 --- a/dbms/src/IO/ReadBufferFromFile.cpp +++ b/dbms/src/IO/ReadBufferFromFile.cpp @@ -41,12 +41,13 @@ ReadBufferFromFile::ReadBufferFromFile( fd = ::open(file_name.c_str(), flags == -1 ? O_RDONLY : flags); if (-1 == fd) - throwFromErrno("Cannot open file " + file_name, errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + throwFromErrnoWithPath("Cannot open file " + file_name, file_name, + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); #ifdef __APPLE__ if (o_direct) { if (fcntl(fd, F_NOCACHE, 1) == -1) - throwFromErrno("Cannot set F_NOCACHE on file " + file_name, ErrorCodes::CANNOT_OPEN_FILE); + throwFromErrno("Cannot set F_NOCACHE on file " + file_name, file_name, ErrorCodes::CANNOT_OPEN_FILE); } #endif } diff --git a/dbms/src/IO/ReadBufferFromFileDescriptor.cpp b/dbms/src/IO/ReadBufferFromFileDescriptor.cpp index 70cc84567f3..db79d078c65 100644 --- a/dbms/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/dbms/src/IO/ReadBufferFromFileDescriptor.cpp @@ -61,7 +61,8 @@ bool ReadBufferFromFileDescriptor::nextImpl() if (-1 == res && errno != EINTR) { ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed); - throwFromErrno("Cannot read from file " + getFileName(), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); + throwFromErrnoWithPath("Cannot read from file " + getFileName(), getFileName(), + ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR); } if (res > 0) @@ -124,7 +125,8 @@ off_t ReadBufferFromFileDescriptor::doSeek(off_t offset, int whence) pos = working_buffer.end(); off_t res = ::lseek(fd, new_pos, SEEK_SET); if (-1 == res) - throwFromErrno("Cannot seek through file " + getFileName(), ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), + ErrorCodes::CANNOT_SEEK_THROUGH_FILE); pos_in_file = new_pos; watch.stop(); diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.h b/dbms/src/IO/ReadWriteBufferFromHTTP.h index 62f2b0351f6..d36633220b4 100644 --- a/dbms/src/IO/ReadWriteBufferFromHTTP.h +++ b/dbms/src/IO/ReadWriteBufferFromHTTP.h @@ -43,14 +43,14 @@ namespace detail using OutStreamCallback = std::function; explicit ReadWriteBufferFromHTTPBase(SessionPtr session_, - Poco::URI uri, - const std::string & method = {}, + Poco::URI uri_, + const std::string & method_ = {}, OutStreamCallback out_stream_callback = {}, const Poco::Net::HTTPBasicCredentials & credentials = {}, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE) : ReadBuffer(nullptr, 0) - , uri {uri} - , method {!method.empty() ? method : out_stream_callback ? Poco::Net::HTTPRequest::HTTP_POST : Poco::Net::HTTPRequest::HTTP_GET} + , uri {uri_} + , method {!method_.empty() ? method_ : out_stream_callback ? Poco::Net::HTTPRequest::HTTP_POST : Poco::Net::HTTPRequest::HTTP_GET} , session {session_} { // With empty path poco will send "POST HTTP/1.1" its bug. diff --git a/dbms/src/IO/WriteBufferAIO.cpp b/dbms/src/IO/WriteBufferAIO.cpp index a558768c64a..2989a6feb40 100644 --- a/dbms/src/IO/WriteBufferAIO.cpp +++ b/dbms/src/IO/WriteBufferAIO.cpp @@ -62,7 +62,7 @@ WriteBufferAIO::WriteBufferAIO(const std::string & filename_, size_t buffer_size if (fd == -1) { auto error_code = (errno == ENOENT) ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE; - throwFromErrno("Cannot open file " + filename, error_code); + throwFromErrnoWithPath("Cannot open file " + filename, filename, error_code); } } @@ -96,7 +96,7 @@ void WriteBufferAIO::sync() /// Ask OS to flush data to disk. int res = ::fsync(fd); if (res == -1) - throwFromErrno("Cannot fsync " + getFileName(), ErrorCodes::CANNOT_FSYNC); + throwFromErrnoWithPath("Cannot fsync " + getFileName(), getFileName(), ErrorCodes::CANNOT_FSYNC); } void WriteBufferAIO::nextImpl() @@ -173,7 +173,7 @@ void WriteBufferAIO::doTruncate(off_t length) int res = ::ftruncate(fd, length); if (res == -1) - throwFromErrno("Cannot truncate file " + filename, ErrorCodes::CANNOT_TRUNCATE_FILE); + throwFromErrnoWithPath("Cannot truncate file " + filename, filename, ErrorCodes::CANNOT_TRUNCATE_FILE); } void WriteBufferAIO::flush() @@ -427,7 +427,7 @@ void WriteBufferAIO::finalize() /// Truncate the file to remove unnecessary zeros from it. int res = ::ftruncate(fd, max_pos_in_file); if (res == -1) - throwFromErrno("Cannot truncate file " + filename, ErrorCodes::CANNOT_TRUNCATE_FILE); + throwFromErrnoWithPath("Cannot truncate file " + filename, filename, ErrorCodes::CANNOT_TRUNCATE_FILE); } } diff --git a/dbms/src/IO/WriteBufferFromFile.cpp b/dbms/src/IO/WriteBufferFromFile.cpp index 3082f674fff..5e6fd7d6fe1 100644 --- a/dbms/src/IO/WriteBufferFromFile.cpp +++ b/dbms/src/IO/WriteBufferFromFile.cpp @@ -44,13 +44,14 @@ WriteBufferFromFile::WriteBufferFromFile( fd = ::open(file_name.c_str(), flags == -1 ? O_WRONLY | O_TRUNC | O_CREAT : flags, mode); if (-1 == fd) - throwFromErrno("Cannot open file " + file_name, errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); + throwFromErrnoWithPath("Cannot open file " + file_name, file_name, + errno == ENOENT ? ErrorCodes::FILE_DOESNT_EXIST : ErrorCodes::CANNOT_OPEN_FILE); #ifdef __APPLE__ if (o_direct) { if (fcntl(fd, F_NOCACHE, 1) == -1) - throwFromErrno("Cannot set F_NOCACHE on file " + file_name, ErrorCodes::CANNOT_OPEN_FILE); + throwFromErrno("Cannot set F_NOCACHE on file " + file_name, file_name, ErrorCodes::CANNOT_OPEN_FILE); } #endif } diff --git a/dbms/src/IO/WriteBufferFromFileDescriptor.cpp b/dbms/src/IO/WriteBufferFromFileDescriptor.cpp index 0ca39b47ada..bfa1e9582d3 100644 --- a/dbms/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/dbms/src/IO/WriteBufferFromFileDescriptor.cpp @@ -56,7 +56,8 @@ void WriteBufferFromFileDescriptor::nextImpl() if ((-1 == res || 0 == res) && errno != EINTR) { ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteFailed); - throwFromErrno("Cannot write to file " + getFileName(), ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR); + throwFromErrnoWithPath("Cannot write to file " + getFileName(), getFileName(), + ErrorCodes::CANNOT_WRITE_TO_FILE_DESCRIPTOR); } if (res > 0) @@ -111,7 +112,7 @@ void WriteBufferFromFileDescriptor::sync() /// Request OS to sync data with storage medium. int res = fsync(fd); if (-1 == res) - throwFromErrno("Cannot fsync " + getFileName(), ErrorCodes::CANNOT_FSYNC); + throwFromErrnoWithPath("Cannot fsync " + getFileName(), getFileName(), ErrorCodes::CANNOT_FSYNC); } @@ -119,7 +120,8 @@ off_t WriteBufferFromFileDescriptor::doSeek(off_t offset, int whence) { off_t res = lseek(fd, offset, whence); if (-1 == res) - throwFromErrno("Cannot seek through file " + getFileName(), ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + throwFromErrnoWithPath("Cannot seek through file " + getFileName(), getFileName(), + ErrorCodes::CANNOT_SEEK_THROUGH_FILE); return res; } @@ -128,7 +130,7 @@ void WriteBufferFromFileDescriptor::doTruncate(off_t length) { int res = ftruncate(fd, length); if (-1 == res) - throwFromErrno("Cannot truncate file " + getFileName(), ErrorCodes::CANNOT_TRUNCATE_FILE); + throwFromErrnoWithPath("Cannot truncate file " + getFileName(), getFileName(), ErrorCodes::CANNOT_TRUNCATE_FILE); } } diff --git a/dbms/src/IO/WriteBufferFromTemporaryFile.cpp b/dbms/src/IO/WriteBufferFromTemporaryFile.cpp index e1250c58097..c5a6bc04350 100644 --- a/dbms/src/IO/WriteBufferFromTemporaryFile.cpp +++ b/dbms/src/IO/WriteBufferFromTemporaryFile.cpp @@ -39,7 +39,8 @@ public: off_t res = lseek(fd, 0, SEEK_SET); if (-1 == res) - throwFromErrno("Cannot reread temporary file " + file_name, ErrorCodes::CANNOT_SEEK_THROUGH_FILE); + throwFromErrnoWithPath("Cannot reread temporary file " + file_name, file_name, + ErrorCodes::CANNOT_SEEK_THROUGH_FILE); return std::make_shared(fd, file_name, std::move(origin->tmp_file)); } diff --git a/dbms/src/IO/WriteBufferValidUTF8.cpp b/dbms/src/IO/WriteBufferValidUTF8.cpp index 01e011982cd..edff9e5bcf4 100644 --- a/dbms/src/IO/WriteBufferValidUTF8.cpp +++ b/dbms/src/IO/WriteBufferValidUTF8.cpp @@ -32,9 +32,9 @@ extern const UInt8 length_of_utf8_sequence[256] = WriteBufferValidUTF8::WriteBufferValidUTF8( - WriteBuffer & output_buffer, bool group_replacements, const char * replacement, size_t size) - : BufferWithOwnMemory(std::max(static_cast(32), size)), output_buffer(output_buffer), - group_replacements(group_replacements), replacement(replacement) + WriteBuffer & output_buffer_, bool group_replacements_, const char * replacement_, size_t size) + : BufferWithOwnMemory(std::max(static_cast(32), size)), output_buffer(output_buffer_), + group_replacements(group_replacements_), replacement(replacement_) { } diff --git a/dbms/src/IO/WriteBufferValidUTF8.h b/dbms/src/IO/WriteBufferValidUTF8.h index 49243a1844f..31151eefbfb 100644 --- a/dbms/src/IO/WriteBufferValidUTF8.h +++ b/dbms/src/IO/WriteBufferValidUTF8.h @@ -30,9 +30,9 @@ public: static const size_t DEFAULT_SIZE; WriteBufferValidUTF8( - WriteBuffer & output_buffer, - bool group_replacements = true, - const char * replacement = "\xEF\xBF\xBD", + WriteBuffer & output_buffer_, + bool group_replacements_ = true, + const char * replacement_ = "\xEF\xBF\xBD", size_t size = DEFAULT_SIZE); virtual ~WriteBufferValidUTF8() override diff --git a/dbms/src/Interpreters/ActionsVisitor.cpp b/dbms/src/Interpreters/ActionsVisitor.cpp index d0237f7cb88..523343a288e 100644 --- a/dbms/src/Interpreters/ActionsVisitor.cpp +++ b/dbms/src/Interpreters/ActionsVisitor.cpp @@ -463,7 +463,7 @@ void ActionsVisitor::visit(const ASTPtr & ast) for (size_t j = 0; j < lambda_arg_asts.size(); ++j) { - auto opt_arg_name = getIdentifierName(lambda_arg_asts[j]); + auto opt_arg_name = tryGetIdentifierName(lambda_arg_asts[j]); if (!opt_arg_name) throw Exception("lambda argument declarations must be identifiers", ErrorCodes::TYPE_MISMATCH); diff --git a/dbms/src/Interpreters/Aggregator.cpp b/dbms/src/Interpreters/Aggregator.cpp index b3c0aa87f8a..23a06ea58d6 100644 --- a/dbms/src/Interpreters/Aggregator.cpp +++ b/dbms/src/Interpreters/Aggregator.cpp @@ -1809,7 +1809,7 @@ private: std::condition_variable condvar; ThreadPool pool; - explicit ParallelMergeData(size_t threads) : pool(threads) {} + explicit ParallelMergeData(size_t threads_) : pool(threads_) {} }; std::unique_ptr parallel_merge_data; diff --git a/dbms/src/Interpreters/CatBoostModel.cpp b/dbms/src/Interpreters/CatBoostModel.cpp index 3e6e66b5c3f..b2e2a4c3d73 100644 --- a/dbms/src/Interpreters/CatBoostModel.cpp +++ b/dbms/src/Interpreters/CatBoostModel.cpp @@ -76,7 +76,7 @@ private: CatBoostWrapperAPI::ModelCalcerHandle * handle; const CatBoostWrapperAPI * api; public: - explicit CatBoostModelHolder(const CatBoostWrapperAPI * api) : api(api) { handle = api->ModelCalcerCreate(); } + explicit CatBoostModelHolder(const CatBoostWrapperAPI * api_) : api(api_) { handle = api->ModelCalcerCreate(); } ~CatBoostModelHolder() { api->ModelCalcerDelete(handle); } CatBoostWrapperAPI::ModelCalcerHandle * get() { return handle; } @@ -86,7 +86,7 @@ public: class CatBoostModelImpl : public ICatBoostModel { public: - CatBoostModelImpl(const CatBoostWrapperAPI * api, const std::string & model_path) : api(api) + CatBoostModelImpl(const CatBoostWrapperAPI * api_, const std::string & model_path) : api(api_) { auto handle_ = std::make_unique(api); if (!handle_) @@ -502,8 +502,8 @@ std::shared_ptr getCatBoostWrapperHolder(const std::string & CatBoostModel::CatBoostModel(std::string name_, std::string model_path_, std::string lib_path_, - const ExternalLoadableLifetime & lifetime) - : name(std::move(name_)), model_path(std::move(model_path_)), lib_path(std::move(lib_path_)), lifetime(lifetime) + const ExternalLoadableLifetime & lifetime_) + : name(std::move(name_)), model_path(std::move(model_path_)), lib_path(std::move(lib_path_)), lifetime(lifetime_) { api_provider = getCatBoostWrapperHolder(lib_path); api = &api_provider->getAPI(); diff --git a/dbms/src/Interpreters/ColumnNamesContext.h b/dbms/src/Interpreters/ColumnNamesContext.h index 72f5f8f8684..c30102cf8d7 100644 --- a/dbms/src/Interpreters/ColumnNamesContext.h +++ b/dbms/src/Interpreters/ColumnNamesContext.h @@ -39,7 +39,7 @@ struct ColumnNamesContext std::optional name() const { if (expr) - return getIdentifierName(expr->database_and_table_name); + return tryGetIdentifierName(expr->database_and_table_name); return {}; } diff --git a/dbms/src/Interpreters/Context.cpp b/dbms/src/Interpreters/Context.cpp index 992593d852c..83f3763bb11 100644 --- a/dbms/src/Interpreters/Context.cpp +++ b/dbms/src/Interpreters/Context.cpp @@ -1189,13 +1189,13 @@ void Context::setCurrentQueryId(const String & query_id) random.words.b = thread_local_rng(); /// Use protected constructor. - struct UUID : Poco::UUID + struct qUUID : Poco::UUID { - UUID(const char * bytes, Poco::UUID::Version version) + qUUID(const char * bytes, Poco::UUID::Version version) : Poco::UUID(bytes, version) {} }; - query_id_to_set = UUID(random.bytes, Poco::UUID::UUID_RANDOM).toString(); + query_id_to_set = qUUID(random.bytes, Poco::UUID::UUID_RANDOM).toString(); } client_info.current_query_id = query_id_to_set; diff --git a/dbms/src/Interpreters/DDLWorker.cpp b/dbms/src/Interpreters/DDLWorker.cpp index b9ab41e38dc..afa4fca79f8 100644 --- a/dbms/src/Interpreters/DDLWorker.cpp +++ b/dbms/src/Interpreters/DDLWorker.cpp @@ -1058,8 +1058,8 @@ class DDLQueryStatusInputStream : public IBlockInputStream { public: - DDLQueryStatusInputStream(const String & zk_node_path, const DDLLogEntry & entry, const Context & context) - : node_path(zk_node_path), context(context), watch(CLOCK_MONOTONIC_COARSE), log(&Logger::get("DDLQueryStatusInputStream")) + DDLQueryStatusInputStream(const String & zk_node_path, const DDLLogEntry & entry, const Context & context_) + : node_path(zk_node_path), context(context_), watch(CLOCK_MONOTONIC_COARSE), log(&Logger::get("DDLQueryStatusInputStream")) { sample = Block{ {std::make_shared(), "host"}, diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.h b/dbms/src/Interpreters/ExpressionAnalyzer.h index 644d10da1be..0a370593e90 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.h +++ b/dbms/src/Interpreters/ExpressionAnalyzer.h @@ -88,12 +88,12 @@ private: const SizeLimits size_limits_for_join; const String join_default_strictness; - ExtractedSettings(const Settings & settings) - : use_index_for_in_with_subqueries(settings.use_index_for_in_with_subqueries), - join_use_nulls(settings.join_use_nulls), - size_limits_for_set(settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode), - size_limits_for_join(settings.max_rows_in_join, settings.max_bytes_in_join, settings.join_overflow_mode), - join_default_strictness(settings.join_default_strictness.toString()) + ExtractedSettings(const Settings & settings_) + : use_index_for_in_with_subqueries(settings_.use_index_for_in_with_subqueries), + join_use_nulls(settings_.join_use_nulls), + size_limits_for_set(settings_.max_rows_in_set, settings_.max_bytes_in_set, settings_.set_overflow_mode), + size_limits_for_join(settings_.max_rows_in_join, settings_.max_bytes_in_join, settings_.join_overflow_mode), + join_default_strictness(settings_.join_default_strictness.toString()) {} }; diff --git a/dbms/src/Interpreters/ExternalDictionaries.cpp b/dbms/src/Interpreters/ExternalDictionaries.cpp index 9f20b492f51..e1cbd377978 100644 --- a/dbms/src/Interpreters/ExternalDictionaries.cpp +++ b/dbms/src/Interpreters/ExternalDictionaries.cpp @@ -9,11 +9,11 @@ namespace DB ExternalDictionaries::ExternalDictionaries( std::unique_ptr config_repository, const Poco::Util::AbstractConfiguration & config, - Context & context) + Context & context_) : ExternalLoader(config, "external dictionary", &Logger::get("ExternalDictionaries")), - context(context) + context(context_) { addConfigRepository(std::move(config_repository), {"dictionary", "name", "dictionaries_config"}); enableAsyncLoading(true); diff --git a/dbms/src/Interpreters/ExternalDictionaries.h b/dbms/src/Interpreters/ExternalDictionaries.h index e1ef53bbd3f..c071349cc97 100644 --- a/dbms/src/Interpreters/ExternalDictionaries.h +++ b/dbms/src/Interpreters/ExternalDictionaries.h @@ -21,7 +21,7 @@ public: ExternalDictionaries( std::unique_ptr config_repository, const Poco::Util::AbstractConfiguration & config, - Context & context); + Context & context_); DictPtr getDictionary(const std::string & name) const { diff --git a/dbms/src/Interpreters/ExternalLoader.h b/dbms/src/Interpreters/ExternalLoader.h index 4c94b8d69cd..8a52d991759 100644 --- a/dbms/src/Interpreters/ExternalLoader.h +++ b/dbms/src/Interpreters/ExternalLoader.h @@ -19,8 +19,8 @@ struct ExternalLoaderUpdateSettings UInt64 backoff_max_sec = 10 * 60; ExternalLoaderUpdateSettings() = default; - ExternalLoaderUpdateSettings(UInt64 check_period_sec, UInt64 backoff_initial_sec, UInt64 backoff_max_sec) - : check_period_sec(check_period_sec), backoff_initial_sec(backoff_initial_sec), backoff_max_sec(backoff_max_sec) {} + ExternalLoaderUpdateSettings(UInt64 check_period_sec_, UInt64 backoff_initial_sec_, UInt64 backoff_max_sec_) + : check_period_sec(check_period_sec_), backoff_initial_sec(backoff_initial_sec_), backoff_max_sec(backoff_max_sec_) {} }; diff --git a/dbms/src/Interpreters/ExternalModels.cpp b/dbms/src/Interpreters/ExternalModels.cpp index cb3e65d6150..f3c1310410b 100644 --- a/dbms/src/Interpreters/ExternalModels.cpp +++ b/dbms/src/Interpreters/ExternalModels.cpp @@ -12,11 +12,11 @@ namespace ErrorCodes ExternalModels::ExternalModels( std::unique_ptr config_repository, - Context & context) - : ExternalLoader(context.getConfigRef(), + Context & context_) + : ExternalLoader(context_.getConfigRef(), "external model", &Logger::get("ExternalModels")), - context(context) + context(context_) { addConfigRepository(std::move(config_repository), {"model", "name", "models_config"}); enablePeriodicUpdates(true); diff --git a/dbms/src/Interpreters/ExternalModels.h b/dbms/src/Interpreters/ExternalModels.h index ff5b9dffcfc..2c4706b0664 100644 --- a/dbms/src/Interpreters/ExternalModels.h +++ b/dbms/src/Interpreters/ExternalModels.h @@ -20,7 +20,7 @@ public: /// Models will be loaded immediately and then will be updated in separate thread, each 'reload_period' seconds. ExternalModels( std::unique_ptr config_repository, - Context & context); + Context & context_); ModelPtr getModel(const std::string & name) const { diff --git a/dbms/src/Interpreters/GlobalSubqueriesVisitor.h b/dbms/src/Interpreters/GlobalSubqueriesVisitor.h index 229fa00a59f..1622c27f62f 100644 --- a/dbms/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/dbms/src/Interpreters/GlobalSubqueriesVisitor.h @@ -75,7 +75,7 @@ public: if (is_table) { /// If this is already an external table, you do not need to add anything. Just remember its presence. - if (external_tables.end() != external_tables.find(*getIdentifierName(subquery_or_table_name))) + if (external_tables.end() != external_tables.find(getIdentifierName(subquery_or_table_name))) return; } diff --git a/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h b/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h index c1d0afe7873..00984832ad6 100644 --- a/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h +++ b/dbms/src/Interpreters/InJoinSubqueriesPreprocessor.h @@ -46,8 +46,8 @@ public: virtual ~CheckShardsAndTables() {} }; - InJoinSubqueriesPreprocessor(const Context & context, CheckShardsAndTables::Ptr _checker = std::make_unique()) - : context(context) + InJoinSubqueriesPreprocessor(const Context & context_, CheckShardsAndTables::Ptr _checker = std::make_unique()) + : context(context_) , checker(std::move(_checker)) {} diff --git a/dbms/src/Interpreters/InterpreterCreateQuery.cpp b/dbms/src/Interpreters/InterpreterCreateQuery.cpp index 4b9271df0e8..0467e91c6d1 100644 --- a/dbms/src/Interpreters/InterpreterCreateQuery.cpp +++ b/dbms/src/Interpreters/InterpreterCreateQuery.cpp @@ -542,7 +542,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) columns = setColumns(create, as_select_sample, as_storage); /// Check low cardinality types in creating table if it was not allowed in setting - if (!create.attach && !context.getSettingsRef().allow_suspicious_low_cardinality_types) + if (!create.attach && !context.getSettingsRef().allow_suspicious_low_cardinality_types && !create.is_materialized_view) { for (const auto & name_and_type_pair : columns.getAllPhysical()) { diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index 9682d0e29e4..5fe473f5d2e 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -156,9 +156,9 @@ String generateFilterActions(ExpressionActionsPtr & actions, const StoragePtr & InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const Context & context_, - const SelectQueryOptions & options, - const Names & required_result_column_names) - : InterpreterSelectQuery(query_ptr_, context_, nullptr, nullptr, options, required_result_column_names) + const SelectQueryOptions & options_, + const Names & required_result_column_names_) + : InterpreterSelectQuery(query_ptr_, context_, nullptr, nullptr, options_, required_result_column_names_) { } @@ -166,16 +166,16 @@ InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const Context & context_, const BlockInputStreamPtr & input_, - const SelectQueryOptions & options) - : InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, options.copy().noSubquery()) + const SelectQueryOptions & options_) + : InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, options_.copy().noSubquery()) {} InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const Context & context_, const StoragePtr & storage_, - const SelectQueryOptions & options) - : InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, options.copy().noSubquery()) + const SelectQueryOptions & options_) + : InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, options_.copy().noSubquery()) {} InterpreterSelectQuery::~InterpreterSelectQuery() = default; diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.h b/dbms/src/Interpreters/InterpreterSelectQuery.h index f6f3c0baf19..0e2cfcd4c7b 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.h +++ b/dbms/src/Interpreters/InterpreterSelectQuery.h @@ -46,7 +46,7 @@ public: const ASTPtr & query_ptr_, const Context & context_, const SelectQueryOptions &, - const Names & required_result_column_names = Names{}); + const Names & required_result_column_names_ = Names{}); /// Read data not from the table specified in the query, but from the prepared source `input`. InterpreterSelectQuery( diff --git a/dbms/src/Interpreters/Join.cpp b/dbms/src/Interpreters/Join.cpp index 9f1a69fba70..0ee93122275 100644 --- a/dbms/src/Interpreters/Join.cpp +++ b/dbms/src/Interpreters/Join.cpp @@ -81,14 +81,14 @@ static ColumnWithTypeAndName correctNullability(ColumnWithTypeAndName && column, } -Join::Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits, +Join::Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits_, ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_, bool any_take_last_row_) : kind(kind_), strictness(strictness_), key_names_right(key_names_right_), use_nulls(use_nulls_), any_take_last_row(any_take_last_row_), log(&Logger::get("Join")), - limits(limits) + limits(limits_) { } diff --git a/dbms/src/Interpreters/Join.h b/dbms/src/Interpreters/Join.h index f57755fad91..fcff80aad62 100644 --- a/dbms/src/Interpreters/Join.h +++ b/dbms/src/Interpreters/Join.h @@ -121,7 +121,7 @@ using MappedAsof = WithFlags; class Join { public: - Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits, + Join(const Names & key_names_right_, bool use_nulls_, const SizeLimits & limits_, ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_, bool any_take_last_row_ = false); bool empty() { return type == Type::EMPTY; } diff --git a/dbms/src/Interpreters/MutationsInterpreter.h b/dbms/src/Interpreters/MutationsInterpreter.h index 268e5f4b081..3fa8961e8f7 100644 --- a/dbms/src/Interpreters/MutationsInterpreter.h +++ b/dbms/src/Interpreters/MutationsInterpreter.h @@ -65,7 +65,7 @@ private: struct Stage { - Stage(const Context & context) : expressions_chain(context) {} + Stage(const Context & context_) : expressions_chain(context_) {} ASTs filters; std::unordered_map column_to_updated; diff --git a/dbms/src/Interpreters/PredicateExpressionsOptimizer.h b/dbms/src/Interpreters/PredicateExpressionsOptimizer.h index f9df113abf2..4fa5cb20e2c 100644 --- a/dbms/src/Interpreters/PredicateExpressionsOptimizer.h +++ b/dbms/src/Interpreters/PredicateExpressionsOptimizer.h @@ -43,13 +43,13 @@ class PredicateExpressionsOptimizer const bool join_use_nulls; template - ExtractedSettings(const T & settings) - : max_ast_depth(settings.max_ast_depth), - max_expanded_ast_elements(settings.max_expanded_ast_elements), - count_distinct_implementation(settings.count_distinct_implementation), - enable_optimize_predicate_expression(settings.enable_optimize_predicate_expression), - enable_optimize_predicate_expression_to_final_subquery(settings.enable_optimize_predicate_expression_to_final_subquery), - join_use_nulls(settings.join_use_nulls) + ExtractedSettings(const T & settings_) + : max_ast_depth(settings_.max_ast_depth), + max_expanded_ast_elements(settings_.max_expanded_ast_elements), + count_distinct_implementation(settings_.count_distinct_implementation), + enable_optimize_predicate_expression(settings_.enable_optimize_predicate_expression), + enable_optimize_predicate_expression_to_final_subquery(settings_.enable_optimize_predicate_expression_to_final_subquery), + join_use_nulls(settings_.join_use_nulls) {} }; diff --git a/dbms/src/Interpreters/QueryNormalizer.cpp b/dbms/src/Interpreters/QueryNormalizer.cpp index cea801c7c2f..844c53c79ac 100644 --- a/dbms/src/Interpreters/QueryNormalizer.cpp +++ b/dbms/src/Interpreters/QueryNormalizer.cpp @@ -74,7 +74,7 @@ void QueryNormalizer::visit(ASTFunction & node, const ASTPtr &, Data & data) if (functionIsInOrGlobalInOperator(func_name)) { auto & ast = func_arguments->children.at(1); - if (auto opt_name = getIdentifierName(ast)) + if (auto opt_name = tryGetIdentifierName(ast)) if (!aliases.count(*opt_name)) setIdentifierSpecial(ast); } diff --git a/dbms/src/Interpreters/Set.h b/dbms/src/Interpreters/Set.h index 61314d3582e..aad10451de0 100644 --- a/dbms/src/Interpreters/Set.h +++ b/dbms/src/Interpreters/Set.h @@ -31,9 +31,9 @@ public: /// (that is useful only for checking that some value is in the set and may not store the original values), /// store all set elements in explicit form. /// This is needed for subsequent use for index. - Set(const SizeLimits & limits, bool fill_set_elements) + Set(const SizeLimits & limits_, bool fill_set_elements_) : log(&Logger::get("Set")), - limits(limits), fill_set_elements(fill_set_elements) + limits(limits_), fill_set_elements(fill_set_elements_) { } diff --git a/dbms/src/Interpreters/SyntaxAnalyzer.cpp b/dbms/src/Interpreters/SyntaxAnalyzer.cpp index 48f40d17cb3..0cb833e9bc7 100644 --- a/dbms/src/Interpreters/SyntaxAnalyzer.cpp +++ b/dbms/src/Interpreters/SyntaxAnalyzer.cpp @@ -43,6 +43,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; extern const int INVALID_JOIN_ON_EXPRESSION; extern const int EMPTY_LIST_OF_COLUMNS_QUERIED; + extern const int NOT_IMPLEMENTED; } NameSet removeDuplicateColumns(NamesAndTypesList & columns) @@ -538,6 +539,23 @@ void replaceJoinedTable(const ASTTablesInSelectQueryElement* join) } } +void checkJoin(const ASTTablesInSelectQueryElement * join) +{ + if (!join->table_join) + return; + + const auto & table_join = join->table_join->as(); + + if (table_join.strictness == ASTTableJoin::Strictness::Any) + if (table_join.kind != ASTTableJoin::Kind::Left) + throw Exception("Old ANY INNER|RIGHT|FULL JOINs are disabled by default. Their logic would be changed. " + "Old logic is many-to-one for all kinds of ANY JOINs. It's equil to apply distinct for right table keys. " + "Default bahaviour is reserved for many-to-one LEFT JOIN, one-to-many RIGHT JOIN and one-to-one INNER JOIN. " + "It would be equal to apply distinct for keys to right, left and both tables respectively. " + "Set any_join_distinct_right_table_keys=1 to enable old bahaviour.", + ErrorCodes::NOT_IMPLEMENTED); +} + } @@ -578,6 +596,9 @@ SyntaxAnalyzerResultPtr SyntaxAnalyzer::analyze( if (const ASTTablesInSelectQueryElement * node = select_query->join()) { + if (!settings.any_join_distinct_right_table_keys) + checkJoin(node); + if (settings.enable_optimize_predicate_expression) replaceJoinedTable(node); diff --git a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp index 34b59d4f993..7ae98d3e9c8 100644 --- a/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp +++ b/dbms/src/Interpreters/TranslateQualifiedNamesVisitor.cpp @@ -253,7 +253,7 @@ void TranslateQualifiedNamesMatcher::extractJoinUsingColumns(const ASTPtr ast, D { const auto & keys = table_join.using_expression_list->as(); for (const auto & key : keys.children) - if (auto opt_column = getIdentifierName(key)) + if (auto opt_column = tryGetIdentifierName(key)) data.join_using_columns.insert(*opt_column); else if (key->as()) data.join_using_columns.insert(key->getColumnName()); diff --git a/dbms/src/Interpreters/executeQuery.cpp b/dbms/src/Interpreters/executeQuery.cpp index 5c7617aa8a1..36bdcc27634 100644 --- a/dbms/src/Interpreters/executeQuery.cpp +++ b/dbms/src/Interpreters/executeQuery.cpp @@ -565,7 +565,7 @@ void executeQuery( } String format_name = ast_query_with_output && (ast_query_with_output->format != nullptr) - ? *getIdentifierName(ast_query_with_output->format) + ? getIdentifierName(ast_query_with_output->format) : context.getDefaultFormat(); if (ast_query_with_output && ast_query_with_output->settings_ast) @@ -610,7 +610,7 @@ void executeQuery( } String format_name = ast_query_with_output && (ast_query_with_output->format != nullptr) - ? *getIdentifierName(ast_query_with_output->format) + ? getIdentifierName(ast_query_with_output->format) : context.getDefaultFormat(); if (ast_query_with_output && ast_query_with_output->settings_ast) diff --git a/dbms/src/Interpreters/loadMetadata.cpp b/dbms/src/Interpreters/loadMetadata.cpp index 84a3adffe07..00090d1d309 100644 --- a/dbms/src/Interpreters/loadMetadata.cpp +++ b/dbms/src/Interpreters/loadMetadata.cpp @@ -107,8 +107,8 @@ void loadMetadata(Context & context) databases.emplace(unescapeForFileName(it.name()), it.path().toString()); } - for (const auto & [name, path] : databases) - loadDatabase(context, name, path, has_force_restore_data_flag); + for (const auto & [name, db_path] : databases) + loadDatabase(context, name, db_path, has_force_restore_data_flag); if (has_force_restore_data_flag) { diff --git a/dbms/src/Parsers/ASTIdentifier.cpp b/dbms/src/Parsers/ASTIdentifier.cpp index fe806ce795a..e3948f99f5b 100644 --- a/dbms/src/Parsers/ASTIdentifier.cpp +++ b/dbms/src/Parsers/ASTIdentifier.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -8,6 +9,12 @@ namespace DB { +namespace ErrorCodes +{ + extern const int UNEXPECTED_AST_STRUCTURE; +} + + ASTPtr ASTIdentifier::clone() const { auto ret = std::make_shared(*this); @@ -92,22 +99,32 @@ ASTPtr createTableIdentifier(const String & database_name, const String & table_ return database_and_table; } -std::optional getIdentifierName(const IAST * const ast) +String getIdentifierName(const IAST * ast) { - if (ast) - if (const auto * node = ast->as()) - return node->name; + String res; + if (tryGetIdentifierNameInto(ast, res)) + return res; + throw Exception(ast ? queryToString(*ast) + " is not an identifier" : "AST node is nullptr", ErrorCodes::UNEXPECTED_AST_STRUCTURE); +} + +std::optional tryGetIdentifierName(const IAST * ast) +{ + String res; + if (tryGetIdentifierNameInto(ast, res)) + return res; return {}; } -bool getIdentifierName(const ASTPtr & ast, String & name) +bool tryGetIdentifierNameInto(const IAST * ast, String & name) { if (ast) + { if (const auto * node = ast->as()) { name = node->name; return true; } + } return false; } diff --git a/dbms/src/Parsers/ASTIdentifier.h b/dbms/src/Parsers/ASTIdentifier.h index 01f7766f1ef..3aaf7381138 100644 --- a/dbms/src/Parsers/ASTIdentifier.h +++ b/dbms/src/Parsers/ASTIdentifier.h @@ -70,9 +70,12 @@ private: ASTPtr createTableIdentifier(const String & database_name, const String & table_name); void setIdentifierSpecial(ASTPtr & ast); -std::optional getIdentifierName(const IAST * const ast); -inline std::optional getIdentifierName(const ASTPtr & ast) { return getIdentifierName(ast.get()); } -bool getIdentifierName(const ASTPtr & ast, String & name); +String getIdentifierName(const IAST * ast); +std::optional tryGetIdentifierName(const IAST * ast); +bool tryGetIdentifierNameInto(const IAST * ast, String & name); +inline String getIdentifierName(const ASTPtr & ast) { return getIdentifierName(ast.get()); } +inline std::optional tryGetIdentifierName(const ASTPtr & ast) { return tryGetIdentifierName(ast.get()); } +inline bool tryGetIdentifierNameInto(const ASTPtr & ast, String & name) { return tryGetIdentifierNameInto(ast.get(), name); } } diff --git a/dbms/src/Parsers/CommonParsers.h b/dbms/src/Parsers/CommonParsers.h index 44c8ab17fb7..60b35c33814 100644 --- a/dbms/src/Parsers/CommonParsers.h +++ b/dbms/src/Parsers/CommonParsers.h @@ -30,7 +30,7 @@ class ParserToken : public IParserBase private: TokenType token_type; public: - ParserToken(TokenType token_type) : token_type(token_type) {} + ParserToken(TokenType token_type_) : token_type(token_type_) {} protected: const char * getName() const override { return "token"; } diff --git a/dbms/src/Parsers/ExpressionElementParsers.cpp b/dbms/src/Parsers/ExpressionElementParsers.cpp index 33c95cdf1aa..eddbe2abb2f 100644 --- a/dbms/src/Parsers/ExpressionElementParsers.cpp +++ b/dbms/src/Parsers/ExpressionElementParsers.cpp @@ -177,7 +177,7 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex { if (!name.empty()) name += '.'; - parts.emplace_back(*getIdentifierName(child)); + parts.emplace_back(getIdentifierName(child)); name += parts.back(); } @@ -225,7 +225,7 @@ bool ParserFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) * If you do not report that the first option is an error, then the argument will be interpreted as 2014 - 01 - 01 - some number, * and the query silently returns an unexpected result. */ - if (*getIdentifierName(identifier) == "toDate" + if (getIdentifierName(identifier) == "toDate" && contents_end - contents_begin == strlen("2014-01-01") && contents_begin[0] >= '2' && contents_begin[0] <= '3' && contents_begin[1] >= '0' && contents_begin[1] <= '9' @@ -267,7 +267,7 @@ bool ParserFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } auto function_node = std::make_shared(); - getIdentifierName(identifier, function_node->name); + tryGetIdentifierNameInto(identifier, function_node->name); /// func(DISTINCT ...) is equivalent to funcDistinct(...) if (has_distinct_modifier) @@ -1158,7 +1158,7 @@ bool ParserAlias::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) * and in the query "SELECT x FRO FROM t", the word FRO was considered an alias. */ - const String name = *getIdentifierName(node); + const String name = getIdentifierName(node); for (const char ** keyword = restricted_keywords; *keyword != nullptr; ++keyword) if (0 == strcasecmp(name.data(), *keyword)) @@ -1326,7 +1326,7 @@ bool ParserWithOptionalAlias::parseImpl(Pos & pos, ASTPtr & node, Expected & exp */ bool allow_alias_without_as_keyword_now = allow_alias_without_as_keyword; if (allow_alias_without_as_keyword) - if (auto opt_id = getIdentifierName(node)) + if (auto opt_id = tryGetIdentifierName(node)) if (0 == strcasecmp(opt_id->data(), "FROM")) allow_alias_without_as_keyword_now = false; @@ -1336,7 +1336,7 @@ bool ParserWithOptionalAlias::parseImpl(Pos & pos, ASTPtr & node, Expected & exp /// FIXME: try to prettify this cast using `as<>()` if (auto * ast_with_alias = dynamic_cast(node.get())) { - getIdentifierName(alias_node, ast_with_alias->alias); + tryGetIdentifierNameInto(alias_node, ast_with_alias->alias); } else { diff --git a/dbms/src/Parsers/Lexer.h b/dbms/src/Parsers/Lexer.h index 3f2712bae08..f705bfcf2d2 100644 --- a/dbms/src/Parsers/Lexer.h +++ b/dbms/src/Parsers/Lexer.h @@ -85,7 +85,7 @@ struct Token size_t size() const { return end - begin; } Token() = default; - Token(TokenType type, const char * begin, const char * end) : type(type), begin(begin), end(end) {} + Token(TokenType type_, const char * begin_, const char * end_) : type(type_), begin(begin_), end(end_) {} bool isSignificant() const { return type != TokenType::Whitespace && type != TokenType::Comment; } bool isError() const { return type > TokenType::EndOfStream; } @@ -96,8 +96,8 @@ struct Token class Lexer { public: - Lexer(const char * begin, const char * end, size_t max_query_size = 0) - : begin(begin), pos(begin), end(end), max_query_size(max_query_size) {} + Lexer(const char * begin_, const char * end_, size_t max_query_size_ = 0) + : begin(begin_), pos(begin_), end(end_), max_query_size(max_query_size_) {} Token nextToken(); private: diff --git a/dbms/src/Parsers/ParserAlterQuery.cpp b/dbms/src/Parsers/ParserAlterQuery.cpp index a4ffcab7c98..3ff2126bff0 100644 --- a/dbms/src/Parsers/ParserAlterQuery.cpp +++ b/dbms/src/Parsers/ParserAlterQuery.cpp @@ -390,7 +390,7 @@ bool ParserAssignment::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!p_expression.parse(pos, assignment->expression, expected)) return false; - getIdentifierName(column, assignment->column_name); + tryGetIdentifierNameInto(column, assignment->column_name); if (assignment->expression) assignment->children.push_back(assignment->expression); diff --git a/dbms/src/Parsers/ParserCheckQuery.cpp b/dbms/src/Parsers/ParserCheckQuery.cpp index 5ba8119571d..c397e1c33c5 100644 --- a/dbms/src/Parsers/ParserCheckQuery.cpp +++ b/dbms/src/Parsers/ParserCheckQuery.cpp @@ -32,13 +32,13 @@ bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!table_parser.parse(pos, table, expected)) return false; - getIdentifierName(database, query->database); - getIdentifierName(table, query->table); + tryGetIdentifierNameInto(database, query->database); + tryGetIdentifierNameInto(table, query->table); } else { table = database; - getIdentifierName(table, query->table); + tryGetIdentifierNameInto(table, query->table); } if (s_partition.ignore(pos, expected)) diff --git a/dbms/src/Parsers/ParserCreateQuery.cpp b/dbms/src/Parsers/ParserCreateQuery.cpp index be0779c4d52..2e7c03a66b9 100644 --- a/dbms/src/Parsers/ParserCreateQuery.cpp +++ b/dbms/src/Parsers/ParserCreateQuery.cpp @@ -39,7 +39,7 @@ bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; auto func = std::make_shared(); - getIdentifierName(name, func->name); + tryGetIdentifierNameInto(name, func->name); func->arguments = columns; func->children.push_back(columns); node = func; @@ -74,7 +74,7 @@ bool ParserIdentifierWithOptionalParameters::parseImpl(Pos & pos, ASTPtr & node, if (non_parametric.parse(pos, ident, expected)) { auto func = std::make_shared(); - getIdentifierName(ident, func->name); + tryGetIdentifierNameInto(ident, func->name); node = func; return true; } @@ -384,8 +384,8 @@ bool ParserCreateQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) query->if_not_exists = if_not_exists; query->cluster = cluster_str; - getIdentifierName(database, query->database); - getIdentifierName(table, query->table); + tryGetIdentifierNameInto(database, query->database); + tryGetIdentifierNameInto(table, query->table); return true; } @@ -542,18 +542,18 @@ bool ParserCreateQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) query->temporary = is_temporary; query->replace_view = replace_view; - getIdentifierName(database, query->database); - getIdentifierName(table, query->table); + tryGetIdentifierNameInto(database, query->database); + tryGetIdentifierNameInto(table, query->table); query->cluster = cluster_str; - getIdentifierName(to_database, query->to_database); - getIdentifierName(to_table, query->to_table); + tryGetIdentifierNameInto(to_database, query->to_database); + tryGetIdentifierNameInto(to_table, query->to_table); query->set(query->columns_list, columns_list); query->set(query->storage, storage); - getIdentifierName(as_database, query->as_database); - getIdentifierName(as_table, query->as_table); + tryGetIdentifierNameInto(as_database, query->as_database); + tryGetIdentifierNameInto(as_table, query->as_table); query->set(query->select, select); return true; diff --git a/dbms/src/Parsers/ParserCreateQuery.h b/dbms/src/Parsers/ParserCreateQuery.h index 98109ae9893..d95e66566b2 100644 --- a/dbms/src/Parsers/ParserCreateQuery.h +++ b/dbms/src/Parsers/ParserCreateQuery.h @@ -73,7 +73,7 @@ bool IParserNameTypePair::parseImpl(Pos & pos, ASTPtr & node, Expect && type_parser.parse(pos, type, expected)) { auto name_type_pair = std::make_shared(); - getIdentifierName(name, name_type_pair->name); + tryGetIdentifierNameInto(name, name_type_pair->name); name_type_pair->type = type; name_type_pair->children.push_back(type); node = name_type_pair; @@ -189,7 +189,7 @@ bool IParserColumnDeclaration::parseImpl(Pos & pos, ASTPtr & node, E const auto column_declaration = std::make_shared(); node = column_declaration; - getIdentifierName(name, column_declaration->name); + tryGetIdentifierNameInto(name, column_declaration->name); if (type) { diff --git a/dbms/src/Parsers/ParserDropQuery.cpp b/dbms/src/Parsers/ParserDropQuery.cpp index ca757ae6168..9cd9744f2f4 100644 --- a/dbms/src/Parsers/ParserDropQuery.cpp +++ b/dbms/src/Parsers/ParserDropQuery.cpp @@ -116,8 +116,8 @@ bool ParserDropQuery::parseDropQuery(Pos & pos, ASTPtr & node, Expected & expect query->if_exists = if_exists; query->temporary = temporary; - getIdentifierName(database, query->database); - getIdentifierName(table, query->table); + tryGetIdentifierNameInto(database, query->database); + tryGetIdentifierNameInto(table, query->table); query->cluster = cluster_str; diff --git a/dbms/src/Parsers/ParserInsertQuery.cpp b/dbms/src/Parsers/ParserInsertQuery.cpp index e86535e8094..f9b38132f18 100644 --- a/dbms/src/Parsers/ParserInsertQuery.cpp +++ b/dbms/src/Parsers/ParserInsertQuery.cpp @@ -147,11 +147,11 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } else { - getIdentifierName(database, query->database); - getIdentifierName(table, query->table); + tryGetIdentifierNameInto(database, query->database); + tryGetIdentifierNameInto(table, query->table); } - getIdentifierName(format, query->format); + tryGetIdentifierNameInto(format, query->format); query->columns = columns; query->select = select; diff --git a/dbms/src/Parsers/ParserInsertQuery.h b/dbms/src/Parsers/ParserInsertQuery.h index 86198365edc..5669d48ffc6 100644 --- a/dbms/src/Parsers/ParserInsertQuery.h +++ b/dbms/src/Parsers/ParserInsertQuery.h @@ -30,7 +30,7 @@ private: const char * getName() const override { return "INSERT query"; } bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; public: - ParserInsertQuery(const char * end) : end(end) {} + ParserInsertQuery(const char * end_) : end(end_) {} }; } diff --git a/dbms/src/Parsers/ParserOptimizeQuery.cpp b/dbms/src/Parsers/ParserOptimizeQuery.cpp index f749b316794..56e28876133 100644 --- a/dbms/src/Parsers/ParserOptimizeQuery.cpp +++ b/dbms/src/Parsers/ParserOptimizeQuery.cpp @@ -58,8 +58,8 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte auto query = std::make_shared(); node = query; - getIdentifierName(database, query->database); - getIdentifierName(table, query->table); + tryGetIdentifierNameInto(database, query->database); + tryGetIdentifierNameInto(table, query->table); query->cluster = cluster_str; query->partition = partition; diff --git a/dbms/src/Parsers/ParserQuery.h b/dbms/src/Parsers/ParserQuery.h index cf8837cb7be..e9a2aae29a7 100644 --- a/dbms/src/Parsers/ParserQuery.h +++ b/dbms/src/Parsers/ParserQuery.h @@ -16,8 +16,8 @@ private: bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override; public: - ParserQuery(const char * end, bool enable_explain_ = false) - : end(end), + ParserQuery(const char * end_, bool enable_explain_ = false) + : end(end_), enable_explain(enable_explain_) {} }; diff --git a/dbms/src/Parsers/ParserRenameQuery.cpp b/dbms/src/Parsers/ParserRenameQuery.cpp index aa5fb43742b..9323ca16ee4 100644 --- a/dbms/src/Parsers/ParserRenameQuery.cpp +++ b/dbms/src/Parsers/ParserRenameQuery.cpp @@ -30,8 +30,8 @@ static bool parseDatabaseAndTable( } db_and_table.database.clear(); - getIdentifierName(database, db_and_table.database); - getIdentifierName(table, db_and_table.table); + tryGetIdentifierNameInto(database, db_and_table.database); + tryGetIdentifierNameInto(table, db_and_table.table); return true; } diff --git a/dbms/src/Parsers/ParserSetQuery.cpp b/dbms/src/Parsers/ParserSetQuery.cpp index 1bdb4be6014..99b08bff337 100644 --- a/dbms/src/Parsers/ParserSetQuery.cpp +++ b/dbms/src/Parsers/ParserSetQuery.cpp @@ -31,7 +31,7 @@ static bool parseNameValuePair(SettingChange & change, IParser::Pos & pos, Expec if (!value_p.parse(pos, value, expected)) return false; - getIdentifierName(name, change.name); + tryGetIdentifierNameInto(name, change.name); change.value = value->as().value; return true; diff --git a/dbms/src/Parsers/ParserShowTablesQuery.cpp b/dbms/src/Parsers/ParserShowTablesQuery.cpp index 9c247a284c1..00e5dcd451e 100644 --- a/dbms/src/Parsers/ParserShowTablesQuery.cpp +++ b/dbms/src/Parsers/ParserShowTablesQuery.cpp @@ -65,7 +65,7 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec return false; } - getIdentifierName(database, query->from); + tryGetIdentifierNameInto(database, query->from); if (like) query->like = safeGet(like->as().value); diff --git a/dbms/src/Parsers/ParserTablePropertiesQuery.cpp b/dbms/src/Parsers/ParserTablePropertiesQuery.cpp index f736023e0d5..a75124a322c 100644 --- a/dbms/src/Parsers/ParserTablePropertiesQuery.cpp +++ b/dbms/src/Parsers/ParserTablePropertiesQuery.cpp @@ -75,8 +75,8 @@ bool ParserTablePropertiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & } } - getIdentifierName(database, query->database); - getIdentifierName(table, query->table); + tryGetIdentifierNameInto(database, query->database); + tryGetIdentifierNameInto(table, query->table); node = query; diff --git a/dbms/src/Parsers/ParserTablesInSelectQuery.h b/dbms/src/Parsers/ParserTablesInSelectQuery.h index da8bd19c382..9d46fc40fa1 100644 --- a/dbms/src/Parsers/ParserTablesInSelectQuery.h +++ b/dbms/src/Parsers/ParserTablesInSelectQuery.h @@ -19,7 +19,7 @@ protected: class ParserTablesInSelectQueryElement : public IParserBase { public: - ParserTablesInSelectQueryElement(bool is_first) : is_first(is_first) {} + ParserTablesInSelectQueryElement(bool is_first_) : is_first(is_first_) {} protected: const char * getName() const { return "table, table function, subquery or list of joined tables"; } diff --git a/dbms/src/Parsers/ParserUseQuery.cpp b/dbms/src/Parsers/ParserUseQuery.cpp index c63a251357c..a71fa17ab78 100644 --- a/dbms/src/Parsers/ParserUseQuery.cpp +++ b/dbms/src/Parsers/ParserUseQuery.cpp @@ -21,7 +21,7 @@ bool ParserUseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; auto query = std::make_shared(); - getIdentifierName(database, query->database); + tryGetIdentifierNameInto(database, query->database); node = query; return true; diff --git a/dbms/src/Parsers/TokenIterator.h b/dbms/src/Parsers/TokenIterator.h index 09724cc46c0..078421c99c9 100644 --- a/dbms/src/Parsers/TokenIterator.h +++ b/dbms/src/Parsers/TokenIterator.h @@ -57,7 +57,7 @@ private: size_t index = 0; public: - explicit TokenIterator(Tokens & tokens) : tokens(&tokens) {} + explicit TokenIterator(Tokens & tokens_) : tokens(&tokens_) {} const Token & get() { return (*tokens)[index]; } const Token & operator*() { return get(); } diff --git a/dbms/src/Parsers/parseDatabaseAndTableName.cpp b/dbms/src/Parsers/parseDatabaseAndTableName.cpp index d7a199a3486..018fee10731 100644 --- a/dbms/src/Parsers/parseDatabaseAndTableName.cpp +++ b/dbms/src/Parsers/parseDatabaseAndTableName.cpp @@ -29,13 +29,13 @@ bool parseDatabaseAndTableName(IParser::Pos & pos, Expected & expected, String & return false; } - getIdentifierName(database, database_str); - getIdentifierName(table, table_str); + tryGetIdentifierNameInto(database, database_str); + tryGetIdentifierNameInto(table, table_str); } else { database_str = ""; - getIdentifierName(database, table_str); + tryGetIdentifierNameInto(database, table_str); } return true; diff --git a/dbms/src/Parsers/parseIdentifierOrStringLiteral.cpp b/dbms/src/Parsers/parseIdentifierOrStringLiteral.cpp index 815a5d3f3cc..7258d3e39da 100644 --- a/dbms/src/Parsers/parseIdentifierOrStringLiteral.cpp +++ b/dbms/src/Parsers/parseIdentifierOrStringLiteral.cpp @@ -20,7 +20,7 @@ bool parseIdentifierOrStringLiteral(IParser::Pos & pos, Expected & expected, Str result = res->as().value.safeGet(); } else - result = *getIdentifierName(res); + result = getIdentifierName(res); return true; } diff --git a/dbms/src/Processors/Executors/PipelineExecutor.cpp b/dbms/src/Processors/Executors/PipelineExecutor.cpp index e45bded427c..31cca4e1a48 100644 --- a/dbms/src/Processors/Executors/PipelineExecutor.cpp +++ b/dbms/src/Processors/Executors/PipelineExecutor.cpp @@ -28,8 +28,8 @@ static bool checkCanAddAdditionalInfoToException(const DB::Exception & exception && exception.code() != ErrorCodes::QUERY_WAS_CANCELLED; } -PipelineExecutor::PipelineExecutor(Processors & processors) - : processors(processors) +PipelineExecutor::PipelineExecutor(Processors & processors_) + : processors(processors_) , cancelled(false) , finished(false) , num_processing_executors(0) diff --git a/dbms/src/Processors/Executors/PipelineExecutor.h b/dbms/src/Processors/Executors/PipelineExecutor.h index 02149cb042f..e448d5e00dd 100644 --- a/dbms/src/Processors/Executors/PipelineExecutor.h +++ b/dbms/src/Processors/Executors/PipelineExecutor.h @@ -24,7 +24,7 @@ public: /// During pipeline execution new processors can appear. They will be added to existing set. /// /// Explicit graph representation is built in constructor. Throws if graph is not correct. - explicit PipelineExecutor(Processors & processors); + explicit PipelineExecutor(Processors & processors_); /// Execute pipeline in multiple threads. Must be called once. /// In case of exception during execution throws any occurred. diff --git a/dbms/src/Processors/Formats/IInputFormat.h b/dbms/src/Processors/Formats/IInputFormat.h index ed26f60c058..424aed455a0 100644 --- a/dbms/src/Processors/Formats/IInputFormat.h +++ b/dbms/src/Processors/Formats/IInputFormat.h @@ -23,8 +23,8 @@ protected: #pragma GCC diagnostic pop public: - IInputFormat(Block header, ReadBuffer & in) - : ISource(std::move(header)), in(in) + IInputFormat(Block header, ReadBuffer & in_) + : ISource(std::move(header)), in(in_) { } diff --git a/dbms/src/Processors/Formats/IOutputFormat.cpp b/dbms/src/Processors/Formats/IOutputFormat.cpp index 63e846aa796..971ad95d946 100644 --- a/dbms/src/Processors/Formats/IOutputFormat.cpp +++ b/dbms/src/Processors/Formats/IOutputFormat.cpp @@ -5,8 +5,8 @@ namespace DB { -IOutputFormat::IOutputFormat(const Block & header, WriteBuffer & out) - : IProcessor({header, header, header}, {}), out(out) +IOutputFormat::IOutputFormat(const Block & header_, WriteBuffer & out_) + : IProcessor({header_, header_, header_}, {}), out(out_) { } diff --git a/dbms/src/Processors/Formats/IOutputFormat.h b/dbms/src/Processors/Formats/IOutputFormat.h index 53e5b9e2158..5200b897643 100644 --- a/dbms/src/Processors/Formats/IOutputFormat.h +++ b/dbms/src/Processors/Formats/IOutputFormat.h @@ -39,7 +39,7 @@ protected: virtual void finalize() {} public: - IOutputFormat(const Block & header, WriteBuffer & out); + IOutputFormat(const Block & header_, WriteBuffer & out_); Status prepare() override; void work() override; diff --git a/dbms/src/Processors/Formats/IRowInputFormat.h b/dbms/src/Processors/Formats/IRowInputFormat.h index 26d1a11a657..72a6c813701 100644 --- a/dbms/src/Processors/Formats/IRowInputFormat.h +++ b/dbms/src/Processors/Formats/IRowInputFormat.h @@ -45,8 +45,8 @@ public: IRowInputFormat( Block header, ReadBuffer & in_, - Params params) - : IInputFormat(std::move(header), in_), params(params) + Params params_) + : IInputFormat(std::move(header), in_), params(params_) { } diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp index 20f40fe1e41..53e00d295f1 100644 --- a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.cpp @@ -7,8 +7,8 @@ namespace DB { -BinaryRowInputFormat::BinaryRowInputFormat(ReadBuffer & in_, Block header, Params params, bool with_names_, bool with_types_) - : IRowInputFormat(std::move(header), in_, params), with_names(with_names_), with_types(with_types_) +BinaryRowInputFormat::BinaryRowInputFormat(ReadBuffer & in_, Block header, Params params_, bool with_names_, bool with_types_) + : IRowInputFormat(std::move(header), in_, params_), with_names(with_names_), with_types(with_types_) { } diff --git a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h b/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h index 9a5a3fe63e1..e96a516c1a7 100644 --- a/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/BinaryRowInputFormat.h @@ -15,7 +15,7 @@ class ReadBuffer; class BinaryRowInputFormat : public IRowInputFormat { public: - BinaryRowInputFormat(ReadBuffer & in_, Block header, Params params, bool with_names_, bool with_types_); + BinaryRowInputFormat(ReadBuffer & in_, Block header, Params params_, bool with_names_, bool with_types_); bool readRow(MutableColumns & columns, RowReadExtension &) override; void readPrefix() override; diff --git a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp index 701878ff57b..b13436a6600 100644 --- a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.cpp @@ -18,10 +18,10 @@ namespace ErrorCodes CSVRowInputFormat::CSVRowInputFormat( - ReadBuffer & in_, Block header, Params params, bool with_names_, const FormatSettings & format_settings) - : IRowInputFormat(std::move(header), in_, std::move(params)) + ReadBuffer & in_, Block header_, Params params_, bool with_names_, const FormatSettings & format_settings_) + : IRowInputFormat(std::move(header_), in_, std::move(params_)) , with_names(with_names_) - , format_settings(format_settings) + , format_settings(format_settings_) { auto & sample = getPort().getHeader(); size_t num_columns = sample.columns(); @@ -40,7 +40,7 @@ CSVRowInputFormat::CSVRowInputFormat( /// If input_format_null_as_default=1 we need ColumnNullable of type DataTypeNullable(nested_type) /// to parse value as nullable before inserting it in corresponding column of not-nullable type. /// Constructing temporary column for each row is slow, so we prepare it here - if (format_settings.csv.null_as_default && !column_info.type->isNullable() && column_info.type->canBeInsideNullable()) + if (format_settings_.csv.null_as_default && !column_info.type->isNullable() && column_info.type->canBeInsideNullable()) { column_idx_to_nullable_column_idx[i] = nullable_columns.size(); nullable_types.emplace_back(std::make_shared(column_info.type)); diff --git a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h b/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h index 6935325f01f..59b24ae0140 100644 --- a/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/CSVRowInputFormat.h @@ -19,7 +19,7 @@ public: /** with_names - in the first line the header with column names * with_types - on the next line header with type names */ - CSVRowInputFormat(ReadBuffer & in_, Block header, Params params, bool with_names, const FormatSettings & format_settings); + CSVRowInputFormat(ReadBuffer & in_, Block header_, Params params_, bool with_names_, const FormatSettings & format_settings_); String getName() const override { return "CSVRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp index c5246dfb1cc..8f17b8f15c8 100644 --- a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.cpp @@ -8,8 +8,8 @@ namespace DB { -CSVRowOutputFormat::CSVRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), with_names(with_names_), format_settings(format_settings) +CSVRowOutputFormat::CSVRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), with_names(with_names_), format_settings(format_settings_) { auto & sample = getPort(PortKind::Main).getHeader(); size_t columns = sample.columns(); diff --git a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h index 5593fc98455..803d3aa80a6 100644 --- a/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/CSVRowOutputFormat.h @@ -20,7 +20,7 @@ public: /** with_names - output in the first line a header with column names * with_types - output in the next line header with the names of the types */ - CSVRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, const FormatSettings & format_settings); + CSVRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, const FormatSettings & format_settings_); String getName() const override { return "CSVRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index 42ef04b64b1..2652304fcb0 100644 --- a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -178,8 +178,8 @@ void CapnProtoRowInputFormat::createActions(const NestedFieldList & sorted_field } } -CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params, const FormatSchemaInfo & info) - : IRowInputFormat(std::move(header), in_, std::move(params)), parser(std::make_shared()) +CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params_, const FormatSchemaInfo & info) + : IRowInputFormat(std::move(header), in_, std::move(params_)), parser(std::make_shared()) { // Parse the schema and fetch the root object diff --git a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h b/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h index b941ceb514d..b7021ea7db7 100644 --- a/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/CapnProtoRowInputFormat.h @@ -33,7 +33,7 @@ public: * schema_file - location of the capnproto schema, e.g. "schema.capnp" * root_object - name to the root object, e.g. "Message" */ - CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params, const FormatSchemaInfo & info); + CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params_, const FormatSchemaInfo & info); String getName() const override { return "CapnProtoRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp index 100edb20f37..9730ae3f3cc 100644 --- a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp @@ -27,8 +27,8 @@ enum JSONEachRowRowInputFormat::JSONEachRowRowInputFormat( - ReadBuffer & in_, const Block & header, Params params, const FormatSettings & format_settings) - : IRowInputFormat(header, in_, std::move(params)), format_settings(format_settings), name_map(header.columns()) + ReadBuffer & in_, const Block & header_, Params params_, const FormatSettings & format_settings_) + : IRowInputFormat(header_, in_, std::move(params_)), format_settings(format_settings_), name_map(header_.columns()) { /// In this format, BOM at beginning of stream cannot be confused with value, so it is safe to skip it. skipBOMIfExists(in); @@ -38,7 +38,7 @@ JSONEachRowRowInputFormat::JSONEachRowRowInputFormat( { const String & column_name = columnName(i); name_map[column_name] = i; /// NOTE You could place names more cache-locally. - if (format_settings.import_nested_json) + if (format_settings_.import_nested_json) { const auto splitted = Nested::splitName(column_name); if (!splitted.second.empty()) diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h b/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h index 1aed7c9dc49..17711b5f27d 100644 --- a/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h @@ -20,7 +20,7 @@ class ReadBuffer; class JSONEachRowRowInputFormat : public IRowInputFormat { public: - JSONEachRowRowInputFormat(ReadBuffer & in_, const Block & header, Params params, const FormatSettings & format_settings); + JSONEachRowRowInputFormat(ReadBuffer & in_, const Block & header_, Params params_, const FormatSettings & format_settings_); String getName() const override { return "JSONEachRowRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp index 112021dce42..cf23e06c9a6 100644 --- a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.cpp @@ -8,8 +8,8 @@ namespace DB { -JSONEachRowRowOutputFormat::JSONEachRowRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings) - : IRowOutputFormat(header, out_), settings(settings) +JSONEachRowRowOutputFormat::JSONEachRowRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & settings_) + : IRowOutputFormat(header_, out_), settings(settings_) { auto & sample = getPort(PortKind::Main).getHeader(); size_t columns = sample.columns(); diff --git a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h index a45f193ea39..66b3fa88652 100644 --- a/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/JSONEachRowRowOutputFormat.h @@ -15,7 +15,7 @@ namespace DB class JSONEachRowRowOutputFormat : public IRowOutputFormat { public: - JSONEachRowRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings); + JSONEachRowRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & settings_); String getName() const override { return "JSONEachRowRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp index 2e48d0643e9..f046c810fbd 100644 --- a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.cpp @@ -14,13 +14,13 @@ namespace DB using namespace MySQLProtocol; -MySQLOutputFormat::MySQLOutputFormat(WriteBuffer & out_, const Block & header, const Context & context, const FormatSettings & settings) - : IOutputFormat(header, out_) - , context(context) - , packet_sender(out, const_cast(context.mysql.sequence_id)) /// TODO: fix it - , format_settings(settings) +MySQLOutputFormat::MySQLOutputFormat(WriteBuffer & out_, const Block & header_, const Context & context_, const FormatSettings & settings_) + : IOutputFormat(header_, out_) + , context(context_) + , packet_sender(out, const_cast(context_.mysql.sequence_id)) /// TODO: fix it + , format_settings(settings_) { - packet_sender.max_packet_size = context.mysql.max_packet_size; + packet_sender.max_packet_size = context_.mysql.max_packet_size; } void MySQLOutputFormat::initialize() diff --git a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h b/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h index e6b319f659a..d5691936862 100644 --- a/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/MySQLOutputFormat.h @@ -19,7 +19,7 @@ class Context; class MySQLOutputFormat: public IOutputFormat { public: - MySQLOutputFormat(WriteBuffer & out_, const Block & header, const Context & context, const FormatSettings & settings); + MySQLOutputFormat(WriteBuffer & out_, const Block & header_, const Context & context_, const FormatSettings & settings_); String getName() const override { return "MySQLOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp index d2a4842fa24..7f30c5bfdc4 100644 --- a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.cpp @@ -12,8 +12,8 @@ namespace DB { ODBCDriver2BlockOutputFormat::ODBCDriver2BlockOutputFormat( - WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IOutputFormat(header, out_), format_settings(format_settings) + WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IOutputFormat(header_, out_), format_settings(format_settings_) { } diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h b/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h index 5a6ed4efc09..7510ce4640a 100644 --- a/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ODBCDriver2BlockOutputFormat.h @@ -20,7 +20,7 @@ class WriteBuffer; class ODBCDriver2BlockOutputFormat final : public IOutputFormat { public: - ODBCDriver2BlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + ODBCDriver2BlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ODBCDriver2BlockOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp index fc8796c8799..3f84bacbfaf 100644 --- a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.cpp @@ -8,8 +8,8 @@ namespace DB { -ODBCDriverBlockOutputFormat::ODBCDriverBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IOutputFormat(header, out_), format_settings(format_settings) +ODBCDriverBlockOutputFormat::ODBCDriverBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IOutputFormat(header_, out_), format_settings(format_settings_) { } diff --git a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h b/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h index 3a0e6e29c40..768b8f2683d 100644 --- a/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ODBCDriverBlockOutputFormat.h @@ -21,7 +21,7 @@ class WriteBuffer; class ODBCDriverBlockOutputFormat : public IOutputFormat { public: - ODBCDriverBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + ODBCDriverBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ODBCDriverBlockOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp b/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp index e5f08c8d645..a2f2fd33e24 100644 --- a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp @@ -45,8 +45,8 @@ namespace ErrorCodes extern const int THERE_IS_NO_COLUMN; } -ParquetBlockInputFormat::ParquetBlockInputFormat(ReadBuffer & in_, Block header, const Context & context) - : IInputFormat(std::move(header), in_), context{context} +ParquetBlockInputFormat::ParquetBlockInputFormat(ReadBuffer & in_, Block header_, const Context & context_) + : IInputFormat(std::move(header_), in_), context{context_} { } diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h b/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h index 8fa9013fbd1..172d3a365d4 100644 --- a/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ParquetBlockInputFormat.h @@ -16,7 +16,7 @@ class Context; class ParquetBlockInputFormat: public IInputFormat { public: - ParquetBlockInputFormat(ReadBuffer & in_, Block header, const Context & context); + ParquetBlockInputFormat(ReadBuffer & in_, Block header_, const Context & context_); String getName() const override { return "ParquetBlockInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp index 3a3540ede7e..e8196c5bf59 100644 --- a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.cpp @@ -35,8 +35,8 @@ namespace ErrorCodes extern const int UNKNOWN_TYPE; } -ParquetBlockOutputFormat::ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IOutputFormat(header, out_), format_settings{format_settings} +ParquetBlockOutputFormat::ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IOutputFormat(header_, out_), format_settings{format_settings_} { } diff --git a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h b/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h index f7ca6f11f00..11d746a0a6d 100644 --- a/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ParquetBlockOutputFormat.h @@ -24,7 +24,7 @@ namespace DB class ParquetBlockOutputFormat : public IOutputFormat { public: - ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + ParquetBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ParquetBlockOutputFormat"; } void consume(Chunk) override; diff --git a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp index 6868a3b2987..84c4fc7cbc3 100644 --- a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.cpp @@ -18,8 +18,8 @@ namespace ErrorCodes PrettyBlockOutputFormat::PrettyBlockOutputFormat( - WriteBuffer & out_, const Block & header, const FormatSettings & format_settings_) - : IOutputFormat(header, out_), format_settings(format_settings_) + WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IOutputFormat(header_, out_), format_settings(format_settings_) { struct winsize w; if (0 == ioctl(STDOUT_FILENO, TIOCGWINSZ, &w)) @@ -54,8 +54,8 @@ void PrettyBlockOutputFormat::calculateWidths( for (size_t j = 0; j < num_rows; ++j) { { - WriteBufferFromString out(serialized_value); - elem.type->serializeAsText(*column, j, out, format_settings); + WriteBufferFromString out_(serialized_value); + elem.type->serializeAsText(*column, j, out_, format_settings); } widths[i][j] = std::min(format_settings.pretty.max_column_pad_width, diff --git a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h b/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h index 34bbbc3000c..eae1c3e9eb1 100644 --- a/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/PrettyBlockOutputFormat.h @@ -18,7 +18,7 @@ class PrettyBlockOutputFormat : public IOutputFormat { public: /// no_escapes - do not use ANSI escape sequences - to display in the browser, not in the console. - PrettyBlockOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings_); + PrettyBlockOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "PrettyBlockOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp index 4f535308b8f..09410a06c0c 100644 --- a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp @@ -11,10 +11,10 @@ namespace DB { -ProtobufRowInputFormat::ProtobufRowInputFormat(ReadBuffer & in_, const Block & header, Params params, const FormatSchemaInfo & info) - : IRowInputFormat(header, in_, params) - , data_types(header.getDataTypes()) - , reader(in, ProtobufSchemas::instance().getMessageTypeForFormatSchema(info), header.getNames()) +ProtobufRowInputFormat::ProtobufRowInputFormat(ReadBuffer & in_, const Block & header_, Params params_, const FormatSchemaInfo & info_) + : IRowInputFormat(header_, in_, params_) + , data_types(header_.getDataTypes()) + , reader(in, ProtobufSchemas::instance().getMessageTypeForFormatSchema(info_), header_.getNames()) { } diff --git a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h b/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h index 89ace7fec90..ebc2283d25c 100644 --- a/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ProtobufRowInputFormat.h @@ -24,7 +24,7 @@ class FormatSchemaInfo; class ProtobufRowInputFormat : public IRowInputFormat { public: - ProtobufRowInputFormat(ReadBuffer & in_, const Block & header, Params params, const FormatSchemaInfo & info); + ProtobufRowInputFormat(ReadBuffer & in_, const Block & header_, Params params_, const FormatSchemaInfo & info_); ~ProtobufRowInputFormat() override; String getName() const override { return "ProtobufRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp index b03480834c5..35a0b4b7a7c 100644 --- a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.cpp @@ -15,8 +15,8 @@ namespace ErrorCodes } -TSKVRowInputFormat::TSKVRowInputFormat(ReadBuffer & in_, Block header, Params params, const FormatSettings & format_settings) - : IRowInputFormat(std::move(header), in_, std::move(params)), format_settings(format_settings), name_map(header.columns()) +TSKVRowInputFormat::TSKVRowInputFormat(ReadBuffer & in_, Block header_, Params params_, const FormatSettings & format_settings_) + : IRowInputFormat(std::move(header_), in_, std::move(params_)), format_settings(format_settings_), name_map(header_.columns()) { /// In this format, we assume that column name cannot contain BOM, /// so BOM at beginning of stream cannot be confused with name of field, and it is safe to skip it. diff --git a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h b/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h index 4d9c55f6efc..52330665395 100644 --- a/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/TSKVRowInputFormat.h @@ -23,7 +23,7 @@ class ReadBuffer; class TSKVRowInputFormat : public IRowInputFormat { public: - TSKVRowInputFormat(ReadBuffer & in_, Block header, Params params, const FormatSettings & format_settings); + TSKVRowInputFormat(ReadBuffer & in_, Block header_, Params params_, const FormatSettings & format_settings_); String getName() const override { return "TSKVRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h index 2d2d7cf4ad4..c2cf31fc196 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRawRowOutputFormat.h @@ -13,8 +13,8 @@ namespace DB class TabSeparatedRawRowOutputFormat : public TabSeparatedRowOutputFormat { public: - TabSeparatedRawRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names_, bool with_types_, const FormatSettings & format_settings_) - : TabSeparatedRowOutputFormat(out_, header, with_names_, with_types_, format_settings_) {} + TabSeparatedRawRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_) + : TabSeparatedRowOutputFormat(out_, header_, with_names_, with_types_, format_settings_) {} String getName() const override { return "TabSeparatedRawRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp index 5834d46b322..39c06c9441b 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp @@ -42,8 +42,8 @@ static void checkForCarriageReturn(ReadBuffer & istr) TabSeparatedRowInputFormat::TabSeparatedRowInputFormat( - ReadBuffer & in_, Block header, bool with_names, bool with_types, Params params, const FormatSettings & format_settings) - : IRowInputFormat(std::move(header), in_, std::move(params)), with_names(with_names), with_types(with_types), format_settings(format_settings) + ReadBuffer & in_, Block header_, bool with_names_, bool with_types_, Params params_, const FormatSettings & format_settings_) + : IRowInputFormat(std::move(header_), in_, std::move(params_)), with_names(with_names_), with_types(with_types_), format_settings(format_settings_) { auto & sample = getPort().getHeader(); size_t num_columns = sample.columns(); diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h b/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h index 47256e0b9a7..076cbb60152 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRowInputFormat.h @@ -20,7 +20,7 @@ public: * with_types - on the next line header with type names */ TabSeparatedRowInputFormat( - ReadBuffer & in_, Block header, bool with_names, bool with_types, Params params, const FormatSettings & format_settings); + ReadBuffer & in_, Block header_, bool with_names_, bool with_types_, Params params_, const FormatSettings & format_settings_); String getName() const override { return "TabSeparatedRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp index 608f2e8b5d0..92058323102 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.cpp @@ -8,8 +8,8 @@ namespace DB { TabSeparatedRowOutputFormat::TabSeparatedRowOutputFormat( - WriteBuffer & out_, const Block & header, bool with_names, bool with_types, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), with_names(with_names), with_types(with_types), format_settings(format_settings) + WriteBuffer & out_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), with_names(with_names_), with_types(with_types_), format_settings(format_settings_) { } diff --git a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h index 7ebe12bc30d..a00cd2d8fdd 100644 --- a/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/TabSeparatedRowOutputFormat.h @@ -18,7 +18,7 @@ public: /** with_names - output in the first line a header with column names * with_types - output the next line header with the names of the types */ - TabSeparatedRowOutputFormat(WriteBuffer & out_, const Block & header, bool with_names, bool with_types, const FormatSettings & format_settings); + TabSeparatedRowOutputFormat(WriteBuffer & out_, const Block & header_, bool with_names_, bool with_types_, const FormatSettings & format_settings_); String getName() const override { return "TabSeparatedRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp index 337085198a3..5eeb29d91e7 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.cpp @@ -29,9 +29,9 @@ namespace ErrorCodes ValuesRowInputFormat::ValuesRowInputFormat( - ReadBuffer & in_, Block header, Params params, const Context & context_, const FormatSettings & format_settings) - : IRowInputFormat(std::move(header), in_, params) - , context(std::make_unique(context_)), format_settings(format_settings) + ReadBuffer & in_, Block header_, Params params_, const Context & context_, const FormatSettings & format_settings_) + : IRowInputFormat(std::move(header_), in_, params_) + , context(std::make_unique(context_)), format_settings(format_settings_) { /// In this format, BOM at beginning of stream cannot be confused with value, so it is safe to skip it. skipBOMIfExists(in); diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.h b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.h index f7ad3b470e6..81ad0c5319e 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ValuesRowInputFormat.h @@ -21,7 +21,7 @@ public: * If interpret_expressions is true, it will, in addition, try to use SQL parser and interpreter * in case when streaming parser could not parse field (this is very slow). */ - ValuesRowInputFormat(ReadBuffer & in_, Block header, Params params, const Context & context_, const FormatSettings & format_settings); + ValuesRowInputFormat(ReadBuffer & in_, Block header_, Params params_, const Context & context_, const FormatSettings & format_settings_); String getName() const override { return "ValuesRowInputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp index 234a9da5c67..80c4135dfb3 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.cpp @@ -10,8 +10,8 @@ namespace DB { -ValuesRowOutputFormat::ValuesRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), format_settings(format_settings) +ValuesRowOutputFormat::ValuesRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), format_settings(format_settings_) { } diff --git a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h index 5f82e78d3c0..f7a28002c92 100644 --- a/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/ValuesRowOutputFormat.h @@ -15,7 +15,7 @@ class WriteBuffer; class ValuesRowOutputFormat : public IRowOutputFormat { public: - ValuesRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + ValuesRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "ValuesRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp index 55f04584c19..744ad2d0953 100644 --- a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.cpp @@ -11,8 +11,8 @@ namespace DB { VerticalRowOutputFormat::VerticalRowOutputFormat( - WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), format_settings(format_settings) + WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), format_settings(format_settings_) { auto & sample = getPort(PortKind::Main).getHeader(); size_t columns = sample.columns(); diff --git a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h index a535d1e9c5b..2a0d248bab8 100644 --- a/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/VerticalRowOutputFormat.h @@ -18,7 +18,7 @@ class Context; class VerticalRowOutputFormat : public IRowOutputFormat { public: - VerticalRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + VerticalRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "VerticalRowOutputFormat"; } diff --git a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp b/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp index 5df58a5c733..545f80692cc 100644 --- a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp +++ b/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.cpp @@ -7,8 +7,8 @@ namespace DB { -XMLRowOutputFormat::XMLRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings) - : IRowOutputFormat(header, out_), format_settings(format_settings) +XMLRowOutputFormat::XMLRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_) + : IRowOutputFormat(header_, out_), format_settings(format_settings_) { auto & sample = getPort(PortKind::Main).getHeader(); NamesAndTypesList columns(sample.getNamesAndTypesList()); diff --git a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h b/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h index 102b11490fe..b2370090c32 100644 --- a/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h +++ b/dbms/src/Processors/Formats/Impl/XMLRowOutputFormat.h @@ -16,7 +16,7 @@ namespace DB class XMLRowOutputFormat : public IRowOutputFormat { public: - XMLRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & format_settings); + XMLRowOutputFormat(WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_); String getName() const override { return "XMLRowOutputFormat"; } diff --git a/dbms/src/Processors/ISimpleTransform.cpp b/dbms/src/Processors/ISimpleTransform.cpp index 39736973a16..292ea24b0ef 100644 --- a/dbms/src/Processors/ISimpleTransform.cpp +++ b/dbms/src/Processors/ISimpleTransform.cpp @@ -4,11 +4,11 @@ namespace DB { -ISimpleTransform::ISimpleTransform(Block input_header, Block output_header, bool skip_empty_chunks) - : IProcessor({std::move(input_header)}, {std::move(output_header)}) +ISimpleTransform::ISimpleTransform(Block input_header_, Block output_header_, bool skip_empty_chunks_) + : IProcessor({std::move(input_header_)}, {std::move(output_header_)}) , input(inputs.front()) , output(outputs.front()) - , skip_empty_chunks(skip_empty_chunks) + , skip_empty_chunks(skip_empty_chunks_) { } diff --git a/dbms/src/Processors/ISimpleTransform.h b/dbms/src/Processors/ISimpleTransform.h index 82e383ceeb0..0a952840964 100644 --- a/dbms/src/Processors/ISimpleTransform.h +++ b/dbms/src/Processors/ISimpleTransform.h @@ -30,7 +30,7 @@ protected: void stopReading() { no_more_data_needed = true; } public: - ISimpleTransform(Block input_header, Block output_header, bool skip_empty_chunks); + ISimpleTransform(Block input_header_, Block output_header_, bool skip_empty_chunks_); Status prepare() override; void work() override; diff --git a/dbms/src/Processors/LimitTransform.cpp b/dbms/src/Processors/LimitTransform.cpp index f591ecfb046..1be10c405bb 100644 --- a/dbms/src/Processors/LimitTransform.cpp +++ b/dbms/src/Processors/LimitTransform.cpp @@ -5,12 +5,12 @@ namespace DB { LimitTransform::LimitTransform( - const Block & header, size_t limit, size_t offset, - bool always_read_till_end) - : IProcessor({header}, {header}) + const Block & header_, size_t limit_, size_t offset_, + bool always_read_till_end_) + : IProcessor({header_}, {header_}) , input(inputs.front()), output(outputs.front()) - , limit(limit), offset(offset) - , always_read_till_end(always_read_till_end) + , limit(limit_), offset(offset_) + , always_read_till_end(always_read_till_end_) { } diff --git a/dbms/src/Processors/LimitTransform.h b/dbms/src/Processors/LimitTransform.h index eb5a8fe8d5a..f80ca263c95 100644 --- a/dbms/src/Processors/LimitTransform.h +++ b/dbms/src/Processors/LimitTransform.h @@ -25,8 +25,8 @@ private: public: LimitTransform( - const Block & header, size_t limit, size_t offset, - bool always_read_till_end = false); + const Block & header_, size_t limit_, size_t offset_, + bool always_read_till_end_ = false); String getName() const override { return "Limit"; } diff --git a/dbms/src/Processors/Port.h b/dbms/src/Processors/Port.h index 99ad7df4b50..1758327e914 100644 --- a/dbms/src/Processors/Port.h +++ b/dbms/src/Processors/Port.h @@ -179,8 +179,8 @@ protected: public: using Data = State::Data; - Port(Block header) : header(std::move(header)) {} - Port(Block header, IProcessor * processor) : header(std::move(header)), processor(processor) {} + Port(Block header_) : header(std::move(header_)) {} + Port(Block header_, IProcessor * processor_) : header(std::move(header_)), processor(processor_) {} const Block & getHeader() const { return header; } bool ALWAYS_INLINE isConnected() const { return state != nullptr; } diff --git a/dbms/src/Processors/Sources/SourceFromInputStream.cpp b/dbms/src/Processors/Sources/SourceFromInputStream.cpp index d9d74a5cde6..f60bc703ec0 100644 --- a/dbms/src/Processors/Sources/SourceFromInputStream.cpp +++ b/dbms/src/Processors/Sources/SourceFromInputStream.cpp @@ -6,9 +6,9 @@ namespace DB { -SourceFromInputStream::SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info) +SourceFromInputStream::SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info_) : ISource(stream_->getHeader()) - , force_add_aggregating_info(force_add_aggregating_info) + , force_add_aggregating_info(force_add_aggregating_info_) , stream(std::move(stream_)) { auto & sample = getPort().getHeader(); diff --git a/dbms/src/Processors/Sources/SourceFromInputStream.h b/dbms/src/Processors/Sources/SourceFromInputStream.h index 46e0b3fb04b..0e6c698f260 100644 --- a/dbms/src/Processors/Sources/SourceFromInputStream.h +++ b/dbms/src/Processors/Sources/SourceFromInputStream.h @@ -10,7 +10,7 @@ using BlockInputStreamPtr = std::shared_ptr; class SourceFromInputStream : public ISource { public: - explicit SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info = false); + explicit SourceFromInputStream(BlockInputStreamPtr stream_, bool force_add_aggregating_info_ = false); String getName() const override { return "SourceFromInputStream"; } Status prepare() override; diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.cpp b/dbms/src/Processors/Transforms/AggregatingTransform.cpp index 5993584f5c9..55fe66b7d9f 100644 --- a/dbms/src/Processors/Transforms/AggregatingTransform.cpp +++ b/dbms/src/Processors/Transforms/AggregatingTransform.cpp @@ -61,8 +61,8 @@ namespace class ConvertingAggregatedToBlocksTransform : public ISource { public: - ConvertingAggregatedToBlocksTransform(Block header, AggregatingTransformParamsPtr params_, BlockInputStreamPtr stream) - : ISource(std::move(header)), params(std::move(params_)), stream(std::move(stream)) {} + ConvertingAggregatedToBlocksTransform(Block header, AggregatingTransformParamsPtr params_, BlockInputStreamPtr stream_) + : ISource(std::move(header)), params(std::move(params_)), stream(std::move(stream_)) {} String getName() const override { return "ConvertingAggregatedToBlocksTransform"; } @@ -99,15 +99,15 @@ AggregatingTransform::AggregatingTransform(Block header, AggregatingTransformPar AggregatingTransform::AggregatingTransform( Block header, AggregatingTransformParamsPtr params_, ManyAggregatedDataPtr many_data_, - size_t current_variant, size_t temporary_data_merge_threads, size_t max_threads) + size_t current_variant, size_t temporary_data_merge_threads_, size_t max_threads_) : IProcessor({std::move(header)}, {params_->getHeader()}), params(std::move(params_)) , key(params->params.keys_size) , key_columns(params->params.keys_size) , aggregate_columns(params->params.aggregates_size) , many_data(std::move(many_data_)) , variants(*many_data->variants[current_variant]) - , max_threads(std::min(many_data->variants.size(), max_threads)) - , temporary_data_merge_threads(temporary_data_merge_threads) + , max_threads(std::min(many_data->variants.size(), max_threads_)) + , temporary_data_merge_threads(temporary_data_merge_threads_) { } diff --git a/dbms/src/Processors/Transforms/AggregatingTransform.h b/dbms/src/Processors/Transforms/AggregatingTransform.h index 64ba10e1801..17786ccfa1a 100644 --- a/dbms/src/Processors/Transforms/AggregatingTransform.h +++ b/dbms/src/Processors/Transforms/AggregatingTransform.h @@ -24,8 +24,8 @@ struct AggregatingTransformParams Aggregator aggregator; bool final; - AggregatingTransformParams(const Aggregator::Params & params, bool final) - : params(params), aggregator(params), final(final) {} + AggregatingTransformParams(const Aggregator::Params & params_, bool final_) + : params(params_), aggregator(params), final(final_) {} Block getHeader() const { return aggregator.getHeader(final); } }; diff --git a/dbms/src/Processors/Transforms/ConvertingTransform.cpp b/dbms/src/Processors/Transforms/ConvertingTransform.cpp index 49dbb748591..8729b896084 100644 --- a/dbms/src/Processors/Transforms/ConvertingTransform.cpp +++ b/dbms/src/Processors/Transforms/ConvertingTransform.cpp @@ -33,12 +33,12 @@ static ColumnPtr castColumnWithDiagnostic( } ConvertingTransform::ConvertingTransform( - Block source_header, - Block result_header, - MatchColumnsMode mode, - const Context & context) - : ISimpleTransform(std::move(source_header), std::move(result_header), false) - , context(context) + Block source_header_, + Block result_header_, + MatchColumnsMode mode_, + const Context & context_) + : ISimpleTransform(std::move(source_header_), std::move(result_header_), false) + , context(context_) , conversion(getOutputPort().getHeader().columns()) { auto & source = getInputPort().getHeader(); @@ -47,14 +47,14 @@ ConvertingTransform::ConvertingTransform( size_t num_input_columns = source.columns(); size_t num_result_columns = result.columns(); - if (mode == MatchColumnsMode::Position && num_input_columns != num_result_columns) + if (mode_ == MatchColumnsMode::Position && num_input_columns != num_result_columns) throw Exception("Number of columns doesn't match", ErrorCodes::NUMBER_OF_COLUMNS_DOESNT_MATCH); for (size_t result_col_num = 0; result_col_num < num_result_columns; ++result_col_num) { const auto & res_elem = result.getByPosition(result_col_num); - switch (mode) + switch (mode_) { case MatchColumnsMode::Position: conversion[result_col_num] = result_col_num; diff --git a/dbms/src/Processors/Transforms/ConvertingTransform.h b/dbms/src/Processors/Transforms/ConvertingTransform.h index d6e6219316a..b2412802ed6 100644 --- a/dbms/src/Processors/Transforms/ConvertingTransform.h +++ b/dbms/src/Processors/Transforms/ConvertingTransform.h @@ -29,10 +29,10 @@ public: }; ConvertingTransform( - Block source_header, - Block result_header, - MatchColumnsMode mode, - const Context & context); + Block source_header_, + Block result_header_, + MatchColumnsMode mode_, + const Context & context_); String getName() const override { return "Converting"; } diff --git a/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp b/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp index 29bc4030b81..71fe743fd49 100644 --- a/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp +++ b/dbms/src/Processors/Transforms/CreatingSetsTransform.cpp @@ -21,15 +21,15 @@ namespace ErrorCodes CreatingSetsTransform::CreatingSetsTransform( - Block out_header, + Block out_header_, const SubqueriesForSets & subqueries_for_sets_, - const SizeLimits & network_transfer_limits, - const Context & context) - : IProcessor({}, {std::move(out_header)}) + const SizeLimits & network_transfer_limits_, + const Context & context_) + : IProcessor({}, {std::move(out_header_)}) , subqueries_for_sets(subqueries_for_sets_) , cur_subquery(subqueries_for_sets.begin()) - , network_transfer_limits(network_transfer_limits) - , context(context) + , network_transfer_limits(network_transfer_limits_) + , context(context_) { } diff --git a/dbms/src/Processors/Transforms/CreatingSetsTransform.h b/dbms/src/Processors/Transforms/CreatingSetsTransform.h index b5f7ea63748..00f64440393 100644 --- a/dbms/src/Processors/Transforms/CreatingSetsTransform.h +++ b/dbms/src/Processors/Transforms/CreatingSetsTransform.h @@ -17,10 +17,10 @@ class CreatingSetsTransform : public IProcessor { public: CreatingSetsTransform( - Block out_header, + Block out_header_, const SubqueriesForSets & subqueries_for_sets_, - const SizeLimits & network_transfer_limits, - const Context & context); + const SizeLimits & network_transfer_limits_, + const Context & context_); String getName() const override { return "CreatingSetsTransform"; } Status prepare() override; diff --git a/dbms/src/Processors/Transforms/DistinctTransform.cpp b/dbms/src/Processors/Transforms/DistinctTransform.cpp index 7cd9a54e055..f9383f1a5e5 100644 --- a/dbms/src/Processors/Transforms/DistinctTransform.cpp +++ b/dbms/src/Processors/Transforms/DistinctTransform.cpp @@ -9,23 +9,23 @@ namespace ErrorCodes } DistinctTransform::DistinctTransform( - const Block & header, - const SizeLimits & set_size_limits, - UInt64 limit_hint, - const Names & columns) - : ISimpleTransform(header, header, true) - , limit_hint(limit_hint) - , set_size_limits(set_size_limits) + const Block & header_, + const SizeLimits & set_size_limits_, + UInt64 limit_hint_, + const Names & columns_) + : ISimpleTransform(header_, header_, true) + , limit_hint(limit_hint_) + , set_size_limits(set_size_limits_) { - size_t num_columns = columns.empty() ? header.columns() : columns.size(); + size_t num_columns = columns_.empty() ? header_.columns() : columns_.size(); - key_columns_pos.reserve(columns.size()); + key_columns_pos.reserve(columns_.size()); for (size_t i = 0; i < num_columns; ++i) { - auto pos = columns.empty() ? i - : header.getPositionByName(columns[i]); + auto pos = columns_.empty() ? i + : header_.getPositionByName(columns_[i]); - auto & col = header.getByPosition(pos).column; + auto & col = header_.getByPosition(pos).column; if (!(col && isColumnConst(*col))) key_columns_pos.emplace_back(pos); diff --git a/dbms/src/Processors/Transforms/DistinctTransform.h b/dbms/src/Processors/Transforms/DistinctTransform.h index 05ef9e9c334..236f9026c63 100644 --- a/dbms/src/Processors/Transforms/DistinctTransform.h +++ b/dbms/src/Processors/Transforms/DistinctTransform.h @@ -11,10 +11,10 @@ class DistinctTransform : public ISimpleTransform { public: DistinctTransform( - const Block & header, - const SizeLimits & set_size_limits, - UInt64 limit_hint, - const Names & columns); + const Block & header_, + const SizeLimits & set_size_limits_, + UInt64 limit_hint_, + const Names & columns_); String getName() const override { return "DistinctTransform"; } diff --git a/dbms/src/Processors/Transforms/ExpressionTransform.cpp b/dbms/src/Processors/Transforms/ExpressionTransform.cpp index c42ef92b085..2ae9dd6f57f 100644 --- a/dbms/src/Processors/Transforms/ExpressionTransform.cpp +++ b/dbms/src/Processors/Transforms/ExpressionTransform.cpp @@ -11,11 +11,11 @@ static Block transformHeader(Block header, const ExpressionActionsPtr & expressi } -ExpressionTransform::ExpressionTransform(const Block & header, ExpressionActionsPtr expression, bool on_totals, bool default_totals) - : ISimpleTransform(header, transformHeader(header, expression), on_totals) - , expression(std::move(expression)) - , on_totals(on_totals) - , default_totals(default_totals) +ExpressionTransform::ExpressionTransform(const Block & header_, ExpressionActionsPtr expression_, bool on_totals_, bool default_totals_) + : ISimpleTransform(header_, transformHeader(header_, expression), on_totals_) + , expression(std::move(expression_)) + , on_totals(on_totals_) + , default_totals(default_totals_) { } diff --git a/dbms/src/Processors/Transforms/ExpressionTransform.h b/dbms/src/Processors/Transforms/ExpressionTransform.h index 8face634f96..6c6d474d872 100644 --- a/dbms/src/Processors/Transforms/ExpressionTransform.h +++ b/dbms/src/Processors/Transforms/ExpressionTransform.h @@ -10,7 +10,7 @@ using ExpressionActionsPtr = std::shared_ptr; class ExpressionTransform : public ISimpleTransform { public: - ExpressionTransform(const Block & header, ExpressionActionsPtr expression, bool on_totals = false, bool default_totals = false); + ExpressionTransform(const Block & header_, ExpressionActionsPtr expression_, bool on_totals_ = false, bool default_totals_ = false); String getName() const override { return "ExpressionTransform"; } diff --git a/dbms/src/Processors/Transforms/FilterTransform.cpp b/dbms/src/Processors/Transforms/FilterTransform.cpp index 725b5ceb01b..8d1fcfa0168 100644 --- a/dbms/src/Processors/Transforms/FilterTransform.cpp +++ b/dbms/src/Processors/Transforms/FilterTransform.cpp @@ -42,14 +42,14 @@ static Block transformHeader( } FilterTransform::FilterTransform( - const Block & header, + const Block & header_, ExpressionActionsPtr expression_, String filter_column_name_, - bool remove_filter_column) - : ISimpleTransform(header, transformHeader(header, expression_, filter_column_name_, remove_filter_column), true) + bool remove_filter_column_) + : ISimpleTransform(header_, transformHeader(header_, expression_, filter_column_name_, remove_filter_column_), true) , expression(std::move(expression_)) , filter_column_name(std::move(filter_column_name_)) - , remove_filter_column(remove_filter_column) + , remove_filter_column(remove_filter_column_) { transformed_header = getInputPort().getHeader(); expression->execute(transformed_header); diff --git a/dbms/src/Processors/Transforms/FilterTransform.h b/dbms/src/Processors/Transforms/FilterTransform.h index 32cdbb79d50..127eb5a8039 100644 --- a/dbms/src/Processors/Transforms/FilterTransform.h +++ b/dbms/src/Processors/Transforms/FilterTransform.h @@ -16,7 +16,7 @@ class FilterTransform : public ISimpleTransform { public: FilterTransform( - const Block & header, ExpressionActionsPtr expression, String filter_column_name, bool remove_filter_column); + const Block & header_, ExpressionActionsPtr expression_, String filter_column_name_, bool remove_filter_column_); String getName() const override { return "FilterTransform"; } diff --git a/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp b/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp index 092327a0d8e..5eee08efcfc 100644 --- a/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp +++ b/dbms/src/Processors/Transforms/LimitsCheckingTransform.cpp @@ -38,9 +38,9 @@ void ProcessorProfileInfo::update(const Chunk & block) bytes += block.bytes(); } -LimitsCheckingTransform::LimitsCheckingTransform(const Block & header, LocalLimits limits) - : ISimpleTransform(header, header, false) - , limits(std::move(limits)) +LimitsCheckingTransform::LimitsCheckingTransform(const Block & header_, LocalLimits limits_) + : ISimpleTransform(header_, header_, false) + , limits(std::move(limits_)) { } diff --git a/dbms/src/Processors/Transforms/LimitsCheckingTransform.h b/dbms/src/Processors/Transforms/LimitsCheckingTransform.h index a08e9ea9c67..53116446a75 100644 --- a/dbms/src/Processors/Transforms/LimitsCheckingTransform.h +++ b/dbms/src/Processors/Transforms/LimitsCheckingTransform.h @@ -30,7 +30,7 @@ public: using LimitsMode = IBlockInputStream::LimitsMode; /// LIMITS_CURRENT - LimitsCheckingTransform(const Block & header, LocalLimits limits); + LimitsCheckingTransform(const Block & header_, LocalLimits limits_); /// LIMITS_TOTAL /// LimitsCheckingTransform(const Block & header, LocalLimits limits, QueryStatus * process_list_elem); diff --git a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp b/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp index a573a6270e9..b9067e955f4 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp +++ b/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.cpp @@ -15,10 +15,10 @@ struct ChunksToMerge : public ChunkInfo }; GroupingAggregatedTransform::GroupingAggregatedTransform( - const Block & header, size_t num_inputs, AggregatingTransformParamsPtr params) - : IProcessor(InputPorts(num_inputs, header), { Block() }) - , num_inputs(num_inputs) - , params(std::move(params)) + const Block & header_, size_t num_inputs_, AggregatingTransformParamsPtr params_) + : IProcessor(InputPorts(num_inputs_, header_), { Block() }) + , num_inputs(num_inputs_) + , params(std::move(params_)) , last_bucket_number(num_inputs, -1) , read_from_input(num_inputs, false) { @@ -285,8 +285,8 @@ void GroupingAggregatedTransform::work() } -MergingAggregatedBucketTransform::MergingAggregatedBucketTransform(AggregatingTransformParamsPtr params) - : ISimpleTransform({}, params->getHeader(), false), params(std::move(params)) +MergingAggregatedBucketTransform::MergingAggregatedBucketTransform(AggregatingTransformParamsPtr params_) + : ISimpleTransform({}, params_->getHeader(), false), params(std::move(params_)) { setInputNotNeededAfterRead(true); } @@ -333,10 +333,10 @@ void MergingAggregatedBucketTransform::transform(Chunk & chunk) } -SortingAggregatedTransform::SortingAggregatedTransform(size_t num_inputs, AggregatingTransformParamsPtr params) - : IProcessor(InputPorts(num_inputs, params->getHeader()), {params->getHeader()}) - , num_inputs(num_inputs) - , params(std::move(params)) +SortingAggregatedTransform::SortingAggregatedTransform(size_t num_inputs_, AggregatingTransformParamsPtr params_) + : IProcessor(InputPorts(num_inputs_, params->getHeader()), {params_->getHeader()}) + , num_inputs(num_inputs_) + , params(std::move(params_)) , last_bucket_number(num_inputs, -1) , is_input_finished(num_inputs, false) { diff --git a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h b/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h index eff71e954a9..0c5986c2156 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h +++ b/dbms/src/Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h @@ -14,7 +14,7 @@ namespace DB class GroupingAggregatedTransform : public IProcessor { public: - GroupingAggregatedTransform(const Block & header, size_t num_inputs, AggregatingTransformParamsPtr params); + GroupingAggregatedTransform(const Block & header_, size_t num_inputs_, AggregatingTransformParamsPtr params_); String getName() const override { return "GroupingAggregatedTransform"; } /// Special setting: in case if single source can return several chunks with same bucket. diff --git a/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp b/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp index 32b833044cd..48fda8d6b8c 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp +++ b/dbms/src/Processors/Transforms/MergingAggregatedTransform.cpp @@ -5,9 +5,9 @@ namespace DB { MergingAggregatedTransform::MergingAggregatedTransform( - Block header, AggregatingTransformParamsPtr params, size_t max_threads) - : IAccumulatingTransform(std::move(header), params->getHeader()) - , params(std::move(params)), max_threads(max_threads) + Block header_, AggregatingTransformParamsPtr params_, size_t max_threads_) + : IAccumulatingTransform(std::move(header_), params_->getHeader()) + , params(std::move(params_)), max_threads(max_threads_) { } diff --git a/dbms/src/Processors/Transforms/MergingAggregatedTransform.h b/dbms/src/Processors/Transforms/MergingAggregatedTransform.h index a8c52f2b047..cb1ce01976c 100644 --- a/dbms/src/Processors/Transforms/MergingAggregatedTransform.h +++ b/dbms/src/Processors/Transforms/MergingAggregatedTransform.h @@ -12,7 +12,7 @@ namespace DB class MergingAggregatedTransform : public IAccumulatingTransform { public: - MergingAggregatedTransform(Block header, AggregatingTransformParamsPtr params, size_t max_threads); + MergingAggregatedTransform(Block header_, AggregatingTransformParamsPtr params_, size_t max_threads_); String getName() const override { return "MergingAggregatedTransform"; } protected: diff --git a/dbms/src/Processors/Transforms/MergingSortedTransform.cpp b/dbms/src/Processors/Transforms/MergingSortedTransform.cpp index b0283b0a56e..8857ec876d7 100644 --- a/dbms/src/Processors/Transforms/MergingSortedTransform.cpp +++ b/dbms/src/Processors/Transforms/MergingSortedTransform.cpp @@ -9,13 +9,13 @@ MergingSortedTransform::MergingSortedTransform( const Block & header, size_t num_inputs, const SortDescription & description_, - size_t max_block_size, - UInt64 limit, - bool quiet, - bool have_all_inputs) + size_t max_block_size_, + UInt64 limit_, + bool quiet_, + bool have_all_inputs_) : IProcessor(InputPorts(num_inputs, header), {header}) - , description(description_), max_block_size(max_block_size), limit(limit), quiet(quiet) - , have_all_inputs(have_all_inputs) + , description(description_), max_block_size(max_block_size_), limit(limit_), quiet(quiet_) + , have_all_inputs(have_all_inputs_) , merged_data(header), source_chunks(num_inputs), cursors(num_inputs) { auto & sample = outputs.front().getHeader(); diff --git a/dbms/src/Processors/Transforms/PartialSortingTransform.cpp b/dbms/src/Processors/Transforms/PartialSortingTransform.cpp index 0f15c34c7ff..7e217ea0a07 100644 --- a/dbms/src/Processors/Transforms/PartialSortingTransform.cpp +++ b/dbms/src/Processors/Transforms/PartialSortingTransform.cpp @@ -5,9 +5,9 @@ namespace DB { PartialSortingTransform::PartialSortingTransform( - const Block & header, SortDescription & description, UInt64 limit, bool do_count_rows) - : ISimpleTransform(header, header, false) - , description(description), limit(limit), do_count_rows(do_count_rows) + const Block & header_, SortDescription & description_, UInt64 limit_, bool do_count_rows_) + : ISimpleTransform(header_, header_, false) + , description(description_), limit(limit_), do_count_rows(do_count_rows_) { } diff --git a/dbms/src/Processors/Transforms/PartialSortingTransform.h b/dbms/src/Processors/Transforms/PartialSortingTransform.h index 645b4ebab07..905b294c0be 100644 --- a/dbms/src/Processors/Transforms/PartialSortingTransform.h +++ b/dbms/src/Processors/Transforms/PartialSortingTransform.h @@ -14,10 +14,10 @@ public: /// limit - if not 0, then you can sort each block not completely, but only `limit` first rows by order. /// When count_rows is false, getNumReadRows() will always return 0. PartialSortingTransform( - const Block & header, - SortDescription & description, - UInt64 limit = 0, - bool do_count_rows = true); + const Block & header_, + SortDescription & description_, + UInt64 limit_ = 0, + bool do_count_rows_ = true); String getName() const override { return "PartialSortingTransform"; } diff --git a/dbms/src/Processors/tests/processors_test.cpp b/dbms/src/Processors/tests/processors_test.cpp index b663cf319ad..519eb79e017 100644 --- a/dbms/src/Processors/tests/processors_test.cpp +++ b/dbms/src/Processors/tests/processors_test.cpp @@ -31,9 +31,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), sleep_useconds(sleep_useconds) + current_number(start_number), sleep_useconds(sleep_useconds_) { } @@ -61,9 +61,9 @@ protected: public: String getName() const override { return "SleepyNumbers"; } - SleepyNumbersSource(UInt64 start_number, unsigned sleep_useconds) + SleepyNumbersSource(UInt64 start_number, unsigned sleep_useconds_) : IProcessor({}, {Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})}) - , output(outputs.front()), current_number(start_number), sleep_useconds(sleep_useconds) + , output(outputs.front()), current_number(start_number), sleep_useconds(sleep_useconds_) { } @@ -122,9 +122,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Processors/tests/processors_test_aggregation.cpp b/dbms/src/Processors/tests/processors_test_aggregation.cpp index 116518391d6..a645804eba8 100644 --- a/dbms/src/Processors/tests/processors_test_aggregation.cpp +++ b/dbms/src/Processors/tests/processors_test_aggregation.cpp @@ -41,9 +41,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, UInt64 step, UInt64 block_size, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, UInt64 step_, UInt64 block_size_, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), step(step), block_size(block_size), sleep_useconds(sleep_useconds) + current_number(start_number), step(step_), block_size(block_size_), sleep_useconds(sleep_useconds_) { } @@ -72,9 +72,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix, Block header) + PrintSink(String prefix_, Block header) : ISink(std::move(header)), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Processors/tests/processors_test_chain.cpp b/dbms/src/Processors/tests/processors_test_chain.cpp index dfcd2c6b5ee..b6a4f0ad653 100644 --- a/dbms/src/Processors/tests/processors_test_chain.cpp +++ b/dbms/src/Processors/tests/processors_test_chain.cpp @@ -28,9 +28,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), sleep_useconds(sleep_useconds) + current_number(start_number), sleep_useconds(sleep_useconds_) { } @@ -52,12 +52,12 @@ private: class SleepyTransform : public ISimpleTransform { public: - explicit SleepyTransform(unsigned sleep_useconds) + explicit SleepyTransform(unsigned sleep_useconds_) : ISimpleTransform( Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), /*skip_empty_chunks =*/ false) - , sleep_useconds(sleep_useconds) {} + , sleep_useconds(sleep_useconds_) {} String getName() const override { return "SleepyTransform"; } @@ -76,9 +76,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp b/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp index fa977dc7ba8..1d03d75c55d 100644 --- a/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp +++ b/dbms/src/Processors/tests/processors_test_expand_pipeline.cpp @@ -26,9 +26,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } @@ -64,9 +64,9 @@ class OneNumberSource : public ISource public: String getName() const override { return "OneNumber"; } - OneNumberSource(UInt64 number) + OneNumberSource(UInt64 number_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - number(number) + number(number_) { } diff --git a/dbms/src/Processors/tests/processors_test_merge.cpp b/dbms/src/Processors/tests/processors_test_merge.cpp index 00e322430e5..3842286bc59 100644 --- a/dbms/src/Processors/tests/processors_test_merge.cpp +++ b/dbms/src/Processors/tests/processors_test_merge.cpp @@ -161,9 +161,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, UInt64 step, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, UInt64 step_, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), step(step), sleep_useconds(sleep_useconds) + current_number(start_number), step(step_), sleep_useconds(sleep_useconds_) { } @@ -187,12 +187,12 @@ private: class SleepyTransform : public ISimpleTransform { public: - explicit SleepyTransform(unsigned sleep_useconds) + explicit SleepyTransform(unsigned sleep_useconds_) : ISimpleTransform( Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), false) - , sleep_useconds(sleep_useconds) {} + , sleep_useconds(sleep_useconds_) {} String getName() const override { return "SleepyTransform"; } @@ -211,9 +211,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp b/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp index 258e89e67e7..a5059011e9b 100644 --- a/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp +++ b/dbms/src/Processors/tests/processors_test_merge_sorting_transform.cpp @@ -31,9 +31,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 count, UInt64 block_size, unsigned sleep_useconds) + NumbersSource(UInt64 count_, UInt64 block_size_, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - count(count), block_size(block_size), sleep_useconds(sleep_useconds) + count(count_), block_size(block_size_), sleep_useconds(sleep_useconds_) { } diff --git a/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp b/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp index 214044dfd31..af27973e3fd 100644 --- a/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp +++ b/dbms/src/Processors/tests/processors_test_merging_sorted_transform.cpp @@ -29,9 +29,9 @@ class NumbersSource : public ISource public: String getName() const override { return "Numbers"; } - NumbersSource(UInt64 start_number, UInt64 step, UInt64 block_size, unsigned sleep_useconds) + NumbersSource(UInt64 start_number, UInt64 step_, UInt64 block_size_, unsigned sleep_useconds_) : ISource(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - current_number(start_number), step(step), block_size(block_size), sleep_useconds(sleep_useconds) + current_number(start_number), step(step_), block_size(block_size_), sleep_useconds(sleep_useconds_) { } @@ -59,12 +59,12 @@ private: class SleepyTransform : public ISimpleTransform { public: - explicit SleepyTransform(unsigned sleep_useconds) + explicit SleepyTransform(unsigned sleep_useconds_) : ISimpleTransform( Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }}), false) - , sleep_useconds(sleep_useconds) {} + , sleep_useconds(sleep_useconds_) {} String getName() const override { return "SleepyTransform"; } @@ -83,9 +83,9 @@ class PrintSink : public ISink public: String getName() const override { return "Print"; } - PrintSink(String prefix) + PrintSink(String prefix_) : ISink(Block({ColumnWithTypeAndName{ ColumnUInt64::create(), std::make_shared(), "number" }})), - prefix(std::move(prefix)) + prefix(std::move(prefix_)) { } diff --git a/dbms/src/Storages/AlterCommands.cpp b/dbms/src/Storages/AlterCommands.cpp index 114d1a8d62b..742a6a2d266 100644 --- a/dbms/src/Storages/AlterCommands.cpp +++ b/dbms/src/Storages/AlterCommands.cpp @@ -64,7 +64,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.codec = compression_codec_factory.get(ast_col_decl.codec, command.data_type); if (command_ast->column) - command.after_column = *getIdentifierName(command_ast->column); + command.after_column = getIdentifierName(command_ast->column); if (ast_col_decl.ttl) command.ttl = ast_col_decl.ttl; @@ -80,7 +80,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ AlterCommand command; command.type = AlterCommand::DROP_COLUMN; - command.column_name = *getIdentifierName(command_ast->column); + command.column_name = getIdentifierName(command_ast->column); command.if_exists = command_ast->if_exists; return command; } @@ -123,7 +123,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ { AlterCommand command; command.type = COMMENT_COLUMN; - command.column_name = *getIdentifierName(command_ast->column); + command.column_name = getIdentifierName(command_ast->column); const auto & ast_comment = command_ast->comment->as(); command.comment = ast_comment.value.get(); command.if_exists = command_ast->if_exists; diff --git a/dbms/src/Storages/AlterCommands.h b/dbms/src/Storages/AlterCommands.h index 4905b80f92f..130e11208cd 100644 --- a/dbms/src/Storages/AlterCommands.h +++ b/dbms/src/Storages/AlterCommands.h @@ -72,13 +72,13 @@ struct AlterCommand CompressionCodecPtr codec; AlterCommand() = default; - AlterCommand(const Type type, const String & column_name, const DataTypePtr & data_type, - const ColumnDefaultKind default_kind, const ASTPtr & default_expression, - const String & after_column, const String & comment, - const bool if_exists, const bool if_not_exists) - : type{type}, column_name{column_name}, data_type{data_type}, default_kind{default_kind}, - default_expression{default_expression}, comment(comment), after_column{after_column}, - if_exists(if_exists), if_not_exists(if_not_exists) + AlterCommand(const Type type_, const String & column_name_, const DataTypePtr & data_type_, + const ColumnDefaultKind default_kind_, const ASTPtr & default_expression_, + const String & after_column_, const String & comment_, + const bool if_exists_, const bool if_not_exists_) + : type{type_}, column_name{column_name_}, data_type{data_type_}, default_kind{default_kind_}, + default_expression{default_expression_}, comment(comment_), after_column{after_column_}, + if_exists(if_exists_), if_not_exists(if_not_exists_) {} static std::optional parse(const ASTAlterCommand * command); diff --git a/dbms/src/Storages/ColumnsDescription.cpp b/dbms/src/Storages/ColumnsDescription.cpp index 2dbe308ea57..b3caeaa767b 100644 --- a/dbms/src/Storages/ColumnsDescription.cpp +++ b/dbms/src/Storages/ColumnsDescription.cpp @@ -91,7 +91,7 @@ void ColumnDescription::writeText(WriteBuffer & buf) const void ColumnDescription::readText(ReadBuffer & buf) { - ParserColumnDeclaration column_parser(true); + ParserColumnDeclaration column_parser(/* require type */ true); String column_line; readEscapedStringUntilEOL(column_line, buf); ASTPtr ast = parseQuery(column_parser, column_line, "column parser", 0); diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.cpp b/dbms/src/Storages/Distributed/DirectoryMonitor.cpp index ff780f6f7b7..7eefc68f3a8 100644 --- a/dbms/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/dbms/src/Storages/Distributed/DirectoryMonitor.cpp @@ -60,13 +60,13 @@ namespace StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor( - StorageDistributed & storage, const std::string & name, const ConnectionPoolPtr & pool, ActionBlocker & monitor_blocker) - : storage(storage), pool{pool}, path{storage.path + name + '/'} + StorageDistributed & storage_, const std::string & name_, const ConnectionPoolPtr & pool_, ActionBlocker & monitor_blocker_) + : storage(storage_), pool{pool_}, path{storage.path + name_ + '/'} , current_batch_file_path{path + "current_batch.txt"} , default_sleep_time{storage.global_context.getSettingsRef().distributed_directory_monitor_sleep_time_ms.totalMilliseconds()} , sleep_time{default_sleep_time} , log{&Logger::get(getLoggerName())} - , monitor_blocker(monitor_blocker) + , monitor_blocker(monitor_blocker_) { const Settings & settings = storage.global_context.getSettingsRef(); should_batch_inserts = settings.distributed_directory_monitor_batch_inserts; diff --git a/dbms/src/Storages/Distributed/DirectoryMonitor.h b/dbms/src/Storages/Distributed/DirectoryMonitor.h index 9416db9be2c..f185d64b66f 100644 --- a/dbms/src/Storages/Distributed/DirectoryMonitor.h +++ b/dbms/src/Storages/Distributed/DirectoryMonitor.h @@ -20,7 +20,7 @@ class StorageDistributedDirectoryMonitor { public: StorageDistributedDirectoryMonitor( - StorageDistributed & storage, const std::string & name, const ConnectionPoolPtr & pool, ActionBlocker & monitor_blocker); + StorageDistributed & storage_, const std::string & name_, const ConnectionPoolPtr & pool_, ActionBlocker & monitor_blocker_); ~StorageDistributedDirectoryMonitor(); diff --git a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp index da374b1b65d..b1b63258f06 100644 --- a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/dbms/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -59,9 +59,9 @@ namespace ErrorCodes DistributedBlockOutputStream::DistributedBlockOutputStream( - const Context & context_, StorageDistributed & storage, const ASTPtr & query_ast, const ClusterPtr & cluster_, + const Context & context_, StorageDistributed & storage_, const ASTPtr & query_ast_, const ClusterPtr & cluster_, bool insert_sync_, UInt64 insert_timeout_) - : context(context_), storage(storage), query_ast(query_ast), query_string(queryToString(query_ast)), + : context(context_), storage(storage_), query_ast(query_ast_), query_string(queryToString(query_ast_)), cluster(cluster_), insert_sync(insert_sync_), insert_timeout(insert_timeout_), log(&Logger::get("DistributedBlockOutputStream")) { @@ -565,7 +565,8 @@ void DistributedBlockOutputStream::writeToShard(const Block & block, const std:: } if (link(first_file_tmp_path.data(), block_file_path.data())) - throwFromErrno("Could not link " + block_file_path + " to " + first_file_tmp_path, ErrorCodes::CANNOT_LINK); + throwFromErrnoWithPath("Could not link " + block_file_path + " to " + first_file_tmp_path, block_file_path, + ErrorCodes::CANNOT_LINK); } /** remove the temporary file, enabling the OS to reclaim inode after all threads diff --git a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h b/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h index f71585b8026..0d5a2e08b11 100644 --- a/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h +++ b/dbms/src/Storages/Distributed/DistributedBlockOutputStream.h @@ -35,7 +35,7 @@ class StorageDistributed; class DistributedBlockOutputStream : public IBlockOutputStream { public: - DistributedBlockOutputStream(const Context & context_, StorageDistributed & storage, const ASTPtr & query_ast, + DistributedBlockOutputStream(const Context & context_, StorageDistributed & storage_, const ASTPtr & query_ast_, const ClusterPtr & cluster_, bool insert_sync_, UInt64 insert_timeout_); Block getHeader() const override; @@ -98,8 +98,8 @@ private: struct JobReplica { JobReplica() = default; - JobReplica(size_t shard_index, size_t replica_index, bool is_local_job, const Block & sample_block) - : shard_index(shard_index), replica_index(replica_index), is_local_job(is_local_job), current_shard_block(sample_block.cloneEmpty()) {} + JobReplica(size_t shard_index_, size_t replica_index_, bool is_local_job_, const Block & sample_block) + : shard_index(shard_index_), replica_index(replica_index_), is_local_job(is_local_job_), current_shard_block(sample_block.cloneEmpty()) {} size_t shard_index = 0; size_t replica_index = 0; diff --git a/dbms/src/Storages/IStorage.cpp b/dbms/src/Storages/IStorage.cpp index 07b2b94c191..bd10796e28a 100644 --- a/dbms/src/Storages/IStorage.cpp +++ b/dbms/src/Storages/IStorage.cpp @@ -156,9 +156,12 @@ namespace } } -void IStorage::check(const Names & column_names) const +void IStorage::check(const Names & column_names, bool include_virtuals) const { - const NamesAndTypesList & available_columns = getColumns().getAllPhysical(); + NamesAndTypesList available_columns = getColumns().getAllPhysical(); + if (include_virtuals) + available_columns.splice(available_columns.end(), getColumns().getVirtuals()); + const String list_of_columns = listOfColumns(available_columns); if (column_names.empty()) diff --git a/dbms/src/Storages/IStorage.h b/dbms/src/Storages/IStorage.h index 4ab866b307a..88b1e3662ca 100644 --- a/dbms/src/Storages/IStorage.h +++ b/dbms/src/Storages/IStorage.h @@ -118,7 +118,7 @@ public: /// thread-unsafe part. lockStructure must be acquired /// Verify that all the requested names are in the table and are set correctly: /// list of names is not empty and the names do not repeat. - void check(const Names & column_names) const; + void check(const Names & column_names, bool include_virtuals = false) const; /// Check that all the requested names are in the table and have the correct types. void check(const NamesAndTypesList & columns) const; diff --git a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp index 19b496e0e60..624936c1626 100644 --- a/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp +++ b/dbms/src/Storages/Kafka/KafkaBlockInputStream.cpp @@ -51,9 +51,9 @@ void KafkaBlockInputStream::readPrefixImpl() buffer->subBufferAs()->subscribe(storage.getTopics()); - const auto & limits = getLimits(); + const auto & limits_ = getLimits(); const size_t poll_timeout = buffer->subBufferAs()->pollTimeout(); - size_t rows_portion_size = poll_timeout ? std::min(max_block_size, limits.max_execution_time.totalMilliseconds() / poll_timeout) : max_block_size; + size_t rows_portion_size = poll_timeout ? std::min(max_block_size, limits_.max_execution_time.totalMilliseconds() / poll_timeout) : max_block_size; rows_portion_size = std::max(rows_portion_size, 1ul); auto non_virtual_header = storage.getSampleBlockNonMaterialized(); /// FIXME: add materialized columns support @@ -67,7 +67,7 @@ void KafkaBlockInputStream::readPrefixImpl() auto child = FormatFactory::instance().getInput( storage.getFormatName(), *buffer, non_virtual_header, context, max_block_size, rows_portion_size, read_callback); - child->setLimits(limits); + child->setLimits(limits_); addChild(child); broken = true; diff --git a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp b/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp index db3de302dd8..01fd09db7e3 100644 --- a/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp +++ b/dbms/src/Storages/Kafka/ReadBufferFromKafkaConsumer.cpp @@ -56,32 +56,42 @@ void ReadBufferFromKafkaConsumer::commit() void ReadBufferFromKafkaConsumer::subscribe(const Names & topics) { { - String message = "Subscribed to topics:"; + String message = "Already subscribed to topics:"; for (const auto & topic : consumer->get_subscription()) message += " " + topic; LOG_TRACE(log, message); } { - String message = "Assigned to topics:"; + String message = "Already assigned to topics:"; for (const auto & toppar : consumer->get_assignment()) message += " " + toppar.get_topic(); LOG_TRACE(log, message); } - consumer->resume(); - // While we wait for an assignment after subscribtion, we'll poll zero messages anyway. // If we're doing a manual select then it's better to get something after a wait, then immediate nothing. - if (consumer->get_subscription().empty()) + // But due to the nature of async pause/resume/subscribe we can't guarantee any persistent state: + // see https://github.com/edenhill/librdkafka/issues/2455 + while (consumer->get_subscription().empty()) { - consumer->pause(); // don't accidentally read any messages - consumer->subscribe(topics); - consumer->poll(5s); - consumer->resume(); + stalled = false; - // FIXME: if we failed to receive "subscribe" response while polling and destroy consumer now, then we may hang up. - // see https://github.com/edenhill/librdkafka/issues/2077 + try + { + consumer->subscribe(topics); + if (nextImpl()) + break; + + // FIXME: if we failed to receive "subscribe" response while polling and destroy consumer now, then we may hang up. + // see https://github.com/edenhill/librdkafka/issues/2077 + } + catch (cppkafka::HandleException & e) + { + if (e.get_error() == RD_KAFKA_RESP_ERR__TIMED_OUT) + continue; + throw; + } } stalled = false; diff --git a/dbms/src/Storages/MarkCache.h b/dbms/src/Storages/MarkCache.h index 3ddef1b3b46..9ce04c01e43 100644 --- a/dbms/src/Storages/MarkCache.h +++ b/dbms/src/Storages/MarkCache.h @@ -38,8 +38,8 @@ private: using Base = LRUCache; public: - MarkCache(size_t max_size_in_bytes, const Delay & expiration_delay) - : Base(max_size_in_bytes, expiration_delay) {} + MarkCache(size_t max_size_in_bytes, const Delay & expiration_delay_) + : Base(max_size_in_bytes, expiration_delay_) {} /// Calculate key from path to file and offset. static UInt128 hash(const String & path_to_file) diff --git a/dbms/src/Storages/MergeTree/DiskSpaceMonitor.h b/dbms/src/Storages/MergeTree/DiskSpaceMonitor.h index d518fea5490..bd95d10e834 100644 --- a/dbms/src/Storages/MergeTree/DiskSpaceMonitor.h +++ b/dbms/src/Storages/MergeTree/DiskSpaceMonitor.h @@ -2,7 +2,15 @@ #include #include +#include +#include +#include +#if defined(__linux__) +#include +#include +#endif #include +#include #include #include #include @@ -23,6 +31,7 @@ namespace ErrorCodes { extern const int CANNOT_STATVFS; extern const int NOT_ENOUGH_SPACE; + extern const int SYSTEM_ERROR; } @@ -96,12 +105,18 @@ public: using ReservationPtr = std::unique_ptr; - static UInt64 getUnreservedFreeSpace(const std::string & path) + inline static struct statvfs getStatVFS(const std::string & path) { struct statvfs fs; - if (statvfs(path.c_str(), &fs) != 0) - throwFromErrno("Could not calculate available disk space (statvfs)", ErrorCodes::CANNOT_STATVFS); + throwFromErrnoWithPath("Could not calculate available disk space (statvfs)", path, + ErrorCodes::CANNOT_STATVFS); + return fs; + } + + static UInt64 getUnreservedFreeSpace(const std::string & path) + { + struct statvfs fs = getStatVFS(path); UInt64 res = fs.f_bfree * fs.f_bsize; @@ -140,6 +155,62 @@ public: return std::make_unique(size); } + /// Returns mount point of filesystem where absoulte_path (must exist) is located + static std::filesystem::path getMountPoint(std::filesystem::path absolute_path) + { + if (absolute_path.is_relative()) + throw Exception("Path is relative. It's a bug.", ErrorCodes::LOGICAL_ERROR); + + absolute_path = std::filesystem::canonical(absolute_path); + + const auto get_device_id = [](const std::filesystem::path & p) + { + struct stat st; + if (stat(p.c_str(), &st)) + throwFromErrnoWithPath("Cannot stat " + p.string(), p.string(), ErrorCodes::SYSTEM_ERROR); + return st.st_dev; + }; + + /// If /some/path/to/dir/ and /some/path/to/ have different device id, + /// then device which contains /some/path/to/dir/filename is mounted to /some/path/to/dir/ + auto device_id = get_device_id(absolute_path); + while (absolute_path.has_relative_path()) + { + auto parent = absolute_path.parent_path(); + auto parent_device_id = get_device_id(parent); + if (device_id != parent_device_id) + return absolute_path; + absolute_path = parent; + device_id = parent_device_id; + } + + return absolute_path; + } + + /// Returns name of filesystem mounted to mount_point +#if !defined(__linux__) +[[noreturn]] +#endif + static std::string getFilesystemName([[maybe_unused]] const std::string & mount_point) + { +#if defined(__linux__) + auto mounted_filesystems = setmntent("/etc/mtab", "r"); + if (!mounted_filesystems) + throw DB::Exception("Cannot open /etc/mtab to get name of filesystem", ErrorCodes::SYSTEM_ERROR); + mntent fs_info; + constexpr size_t buf_size = 4096; /// The same as buffer used for getmntent in glibc. It can happen that it's not enough + char buf[buf_size]; + while (getmntent_r(mounted_filesystems, &fs_info, buf, buf_size) && fs_info.mnt_dir != mount_point) + ; + endmntent(mounted_filesystems); + if (fs_info.mnt_dir != mount_point) + throw DB::Exception("Cannot find name of filesystem by mount point " + mount_point, ErrorCodes::SYSTEM_ERROR); + return fs_info.mnt_fsname; +#else + throw DB::Exception("Supported on linux only", ErrorCodes::NOT_IMPLEMENTED); +#endif + } + private: static UInt64 reserved_bytes; static UInt64 reservation_count; diff --git a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp index 8321d7dc8b2..4109a5511af 100644 --- a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.cpp @@ -414,20 +414,20 @@ void IMergedBlockOutputStream::finishSkipIndicesSerialization( IMergedBlockOutputStream::ColumnStream::ColumnStream( const String & escaped_column_name_, - const String & data_path, + const String & data_path_, const std::string & data_file_extension_, - const std::string & marks_path, + const std::string & marks_path_, const std::string & marks_file_extension_, - const CompressionCodecPtr & compression_codec, - size_t max_compress_block_size, - size_t estimated_size, - size_t aio_threshold) : + const CompressionCodecPtr & compression_codec_, + size_t max_compress_block_size_, + size_t estimated_size_, + size_t aio_threshold_) : escaped_column_name(escaped_column_name_), data_file_extension{data_file_extension_}, marks_file_extension{marks_file_extension_}, - plain_file(createWriteBufferFromFileBase(data_path + data_file_extension, estimated_size, aio_threshold, max_compress_block_size)), - plain_hashing(*plain_file), compressed_buf(plain_hashing, compression_codec), compressed(compressed_buf), - marks_file(marks_path + marks_file_extension, 4096, O_TRUNC | O_CREAT | O_WRONLY), marks(marks_file) + plain_file(createWriteBufferFromFileBase(data_path_ + data_file_extension, estimated_size_, aio_threshold_, max_compress_block_size_)), + plain_hashing(*plain_file), compressed_buf(plain_hashing, compression_codec_), compressed(compressed_buf), + marks_file(marks_path_ + marks_file_extension, 4096, O_TRUNC | O_CREAT | O_WRONLY), marks(marks_file) { } diff --git a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h b/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h index f1fbb058436..cbf78c1a2ea 100644 --- a/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/dbms/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -35,14 +35,14 @@ protected: { ColumnStream( const String & escaped_column_name_, - const String & data_path, + const String & data_path_, const std::string & data_file_extension_, - const std::string & marks_path, + const std::string & marks_path_, const std::string & marks_file_extension_, - const CompressionCodecPtr & compression_codec, - size_t max_compress_block_size, - size_t estimated_size, - size_t aio_threshold); + const CompressionCodecPtr & compression_codec_, + size_t max_compress_block_size_, + size_t estimated_size_, + size_t aio_threshold_); String escaped_column_name; std::string data_file_extension; diff --git a/dbms/src/Storages/MergeTree/LevelMergeSelector.h b/dbms/src/Storages/MergeTree/LevelMergeSelector.h index fa35500dcae..4ce6624bea1 100644 --- a/dbms/src/Storages/MergeTree/LevelMergeSelector.h +++ b/dbms/src/Storages/MergeTree/LevelMergeSelector.h @@ -17,7 +17,7 @@ public: size_t parts_to_merge = 10; }; - explicit LevelMergeSelector(const Settings & settings) : settings(settings) {} + explicit LevelMergeSelector(const Settings & settings_) : settings(settings_) {} PartsInPartition select( const Partitions & partitions, diff --git a/dbms/src/Storages/MergeTree/MarkRange.h b/dbms/src/Storages/MergeTree/MarkRange.h index 8be0305b807..657ffe32f78 100644 --- a/dbms/src/Storages/MergeTree/MarkRange.h +++ b/dbms/src/Storages/MergeTree/MarkRange.h @@ -16,7 +16,7 @@ struct MarkRange size_t end; MarkRange() = default; - MarkRange(const size_t begin, const size_t end) : begin{begin}, end{end} {} + MarkRange(const size_t begin_, const size_t end_) : begin{begin_}, end{end_} {} }; using MarkRanges = std::vector; diff --git a/dbms/src/Storages/MergeTree/MergeList.cpp b/dbms/src/Storages/MergeTree/MergeList.cpp index 7d4d7f92496..3e4537ad45c 100644 --- a/dbms/src/Storages/MergeTree/MergeList.cpp +++ b/dbms/src/Storages/MergeTree/MergeList.cpp @@ -14,8 +14,8 @@ namespace CurrentMetrics namespace DB { -MergeListElement::MergeListElement(const std::string & database, const std::string & table, const FutureMergedMutatedPart & future_part) - : database{database}, table{table}, partition_id{future_part.part_info.partition_id} +MergeListElement::MergeListElement(const std::string & database_, const std::string & table_, const FutureMergedMutatedPart & future_part) + : database{database_}, table{table_}, partition_id{future_part.part_info.partition_id} , result_part_name{future_part.name} , result_data_version{future_part.part_info.getDataVersion()} , num_parts{future_part.parts.size()} diff --git a/dbms/src/Storages/MergeTree/MergeList.h b/dbms/src/Storages/MergeTree/MergeList.h index dc2d1c80682..0a25277a6ed 100644 --- a/dbms/src/Storages/MergeTree/MergeList.h +++ b/dbms/src/Storages/MergeTree/MergeList.h @@ -110,7 +110,7 @@ public: MergeListEntry(const MergeListEntry &) = delete; MergeListEntry & operator=(const MergeListEntry &) = delete; - MergeListEntry(MergeList & list, const container_t::iterator it) : list(list), it{it} {} + MergeListEntry(MergeList & list_, const container_t::iterator it_) : list(list_), it{it_} {} ~MergeListEntry(); MergeListElement * operator->() { return &*it; } diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp b/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp index 10d7e3750e4..0489182fe55 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.cpp @@ -20,27 +20,27 @@ namespace ErrorCodes MergeTreeBaseSelectBlockInputStream::MergeTreeBaseSelectBlockInputStream( - const MergeTreeData & storage, - const PrewhereInfoPtr & prewhere_info, - UInt64 max_block_size_rows, - UInt64 preferred_block_size_bytes, - UInt64 preferred_max_column_in_block_size_bytes, - UInt64 min_bytes_to_use_direct_io, - UInt64 max_read_buffer_size, - bool use_uncompressed_cache, - bool save_marks_in_cache, - const Names & virt_column_names) + const MergeTreeData & storage_, + const PrewhereInfoPtr & prewhere_info_, + UInt64 max_block_size_rows_, + UInt64 preferred_block_size_bytes_, + UInt64 preferred_max_column_in_block_size_bytes_, + UInt64 min_bytes_to_use_direct_io_, + UInt64 max_read_buffer_size_, + bool use_uncompressed_cache_, + bool save_marks_in_cache_, + const Names & virt_column_names_) : - storage(storage), - prewhere_info(prewhere_info), - max_block_size_rows(max_block_size_rows), - preferred_block_size_bytes(preferred_block_size_bytes), - preferred_max_column_in_block_size_bytes(preferred_max_column_in_block_size_bytes), - min_bytes_to_use_direct_io(min_bytes_to_use_direct_io), - max_read_buffer_size(max_read_buffer_size), - use_uncompressed_cache(use_uncompressed_cache), - save_marks_in_cache(save_marks_in_cache), - virt_column_names(virt_column_names) + storage(storage_), + prewhere_info(prewhere_info_), + max_block_size_rows(max_block_size_rows_), + preferred_block_size_bytes(preferred_block_size_bytes_), + preferred_max_column_in_block_size_bytes(preferred_max_column_in_block_size_bytes_), + min_bytes_to_use_direct_io(min_bytes_to_use_direct_io_), + max_read_buffer_size(max_read_buffer_size_), + use_uncompressed_cache(use_uncompressed_cache_), + save_marks_in_cache(save_marks_in_cache_), + virt_column_names(virt_column_names_) { } diff --git a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.h b/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.h index a7e37f68f0c..640f73652e4 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.h +++ b/dbms/src/Storages/MergeTree/MergeTreeBaseSelectBlockInputStream.h @@ -18,16 +18,16 @@ class MergeTreeBaseSelectBlockInputStream : public IBlockInputStream { public: MergeTreeBaseSelectBlockInputStream( - const MergeTreeData & storage, - const PrewhereInfoPtr & prewhere_info, - UInt64 max_block_size_rows, - UInt64 preferred_block_size_bytes, - UInt64 preferred_max_column_in_block_size_bytes, - UInt64 min_bytes_to_use_direct_io, - UInt64 max_read_buffer_size, - bool use_uncompressed_cache, - bool save_marks_in_cache = true, - const Names & virt_column_names = {}); + const MergeTreeData & storage_, + const PrewhereInfoPtr & prewhere_info_, + UInt64 max_block_size_rows_, + UInt64 preferred_block_size_bytes_, + UInt64 preferred_max_column_in_block_size_bytes_, + UInt64 min_bytes_to_use_direct_io_, + UInt64 max_read_buffer_size_, + bool use_uncompressed_cache_, + bool save_marks_in_cache_ = true, + const Names & virt_column_names_ = {}); ~MergeTreeBaseSelectBlockInputStream() override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h b/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h index ace24b474f1..8f957d631d3 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h +++ b/dbms/src/Storages/MergeTree/MergeTreeBlockOutputStream.h @@ -13,8 +13,8 @@ class StorageMergeTree; class MergeTreeBlockOutputStream : public IBlockOutputStream { public: - MergeTreeBlockOutputStream(StorageMergeTree & storage_, size_t max_parts_per_block) - : storage(storage_), max_parts_per_block(max_parts_per_block) {} + MergeTreeBlockOutputStream(StorageMergeTree & storage_, size_t max_parts_per_block_) + : storage(storage_), max_parts_per_block(max_parts_per_block_) {} Block getHeader() const override; void write(const Block & block) override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 96ece027694..7dc9a40e89a 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -66,13 +66,13 @@ NameSet injectRequiredColumns(const MergeTreeData & storage, const MergeTreeData MergeTreeReadTask::MergeTreeReadTask( - const MergeTreeData::DataPartPtr & data_part, const MarkRanges & mark_ranges, const size_t part_index_in_query, - const Names & ordered_names, const NameSet & column_name_set, const NamesAndTypesList & columns, - const NamesAndTypesList & pre_columns, const bool remove_prewhere_column, const bool should_reorder, - MergeTreeBlockSizePredictorPtr && size_predictor) - : data_part{data_part}, mark_ranges{mark_ranges}, part_index_in_query{part_index_in_query}, - ordered_names{ordered_names}, column_name_set{column_name_set}, columns{columns}, pre_columns{pre_columns}, - remove_prewhere_column{remove_prewhere_column}, should_reorder{should_reorder}, size_predictor{std::move(size_predictor)} + const MergeTreeData::DataPartPtr & data_part_, const MarkRanges & mark_ranges_, const size_t part_index_in_query_, + const Names & ordered_names_, const NameSet & column_name_set_, const NamesAndTypesList & columns_, + const NamesAndTypesList & pre_columns_, const bool remove_prewhere_column_, const bool should_reorder_, + MergeTreeBlockSizePredictorPtr && size_predictor_) + : data_part{data_part_}, mark_ranges{mark_ranges_}, part_index_in_query{part_index_in_query_}, + ordered_names{ordered_names_}, column_name_set{column_name_set_}, columns{columns_}, pre_columns{pre_columns_}, + remove_prewhere_column{remove_prewhere_column_}, should_reorder{should_reorder_}, size_predictor{std::move(size_predictor_)} {} MergeTreeReadTask::~MergeTreeReadTask() = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h b/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h index f0e24d96add..a031255b3ab 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h +++ b/dbms/src/Storages/MergeTree/MergeTreeBlockReadUtils.h @@ -56,10 +56,10 @@ struct MergeTreeReadTask bool isFinished() const { return mark_ranges.empty() && range_reader.isCurrentRangeFinished(); } MergeTreeReadTask( - const MergeTreeData::DataPartPtr & data_part, const MarkRanges & mark_ranges, const size_t part_index_in_query, - const Names & ordered_names, const NameSet & column_name_set, const NamesAndTypesList & columns, - const NamesAndTypesList & pre_columns, const bool remove_prewhere_column, const bool should_reorder, - MergeTreeBlockSizePredictorPtr && size_predictor); + const MergeTreeData::DataPartPtr & data_part_, const MarkRanges & mark_ranges_, const size_t part_index_in_query_, + const Names & ordered_names_, const NameSet & column_name_set_, const NamesAndTypesList & columns_, + const NamesAndTypesList & pre_columns_, const bool remove_prewhere_column_, const bool should_reorder_, + MergeTreeBlockSizePredictorPtr && size_predictor_); virtual ~MergeTreeReadTask(); }; diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp index cdd72a222bd..f64bdcc9740 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataPart.cpp @@ -413,7 +413,8 @@ void MergeTreeDataPart::remove() const { String path_to_remove = to + "/" + file; if (0 != unlink(path_to_remove.c_str())) - throwFromErrno("Cannot unlink file " + path_to_remove, ErrorCodes::CANNOT_UNLINK); + throwFromErrnoWithPath("Cannot unlink file " + path_to_remove, path_to_remove, + ErrorCodes::CANNOT_UNLINK); } #if !__clang__ #pragma GCC diagnostic pop @@ -423,11 +424,12 @@ void MergeTreeDataPart::remove() const { String path_to_remove = to + "/" + file; if (0 != unlink(path_to_remove.c_str())) - throwFromErrno("Cannot unlink file " + path_to_remove, ErrorCodes::CANNOT_UNLINK); + throwFromErrnoWithPath("Cannot unlink file " + path_to_remove, path_to_remove, + ErrorCodes::CANNOT_UNLINK); } if (0 != rmdir(to.c_str())) - throwFromErrno("Cannot rmdir file " + to, ErrorCodes::CANNOT_UNLINK); + throwFromErrnoWithPath("Cannot rmdir file " + to, to, ErrorCodes::CANNOT_UNLINK); } catch (...) { diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataPart.h b/dbms/src/Storages/MergeTree/MergeTreeDataPart.h index f41ea8af424..e46a64f0546 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataPart.h +++ b/dbms/src/Storages/MergeTree/MergeTreeDataPart.h @@ -153,7 +153,7 @@ struct MergeTreeDataPart struct StatesFilter { std::initializer_list affordable_states; - StatesFilter(const std::initializer_list & affordable_states) : affordable_states(affordable_states) {} + StatesFilter(const std::initializer_list & affordable_states_) : affordable_states(affordable_states_) {} bool operator() (const std::shared_ptr & part) const { diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index b86da56649d..ba44053afa7 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -40,8 +40,8 @@ bool MergeTreeIndexBloomFilter::mayBenefitFromIndexForIn(const ASTPtr & node) co { const String & column_name = node->getColumnName(); - for (const auto & name : columns) - if (column_name == name) + for (const auto & cname : columns) + if (column_name == cname) return true; if (const auto * func = typeid_cast(node.get())) diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp index 9c8a9d4b41c..f2892fc51a7 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp @@ -76,11 +76,11 @@ bool maybeTrueOnBloomFilter(const IColumn * hash_column, const BloomFilterPtr & } MergeTreeIndexConditionBloomFilter::MergeTreeIndexConditionBloomFilter( - const SelectQueryInfo & info, const Context & context, const Block & header, size_t hash_functions) - : header(header), context(context), query_info(info), hash_functions(hash_functions) + const SelectQueryInfo & info_, const Context & context_, const Block & header_, size_t hash_functions_) + : header(header_), context(context_), query_info(info_), hash_functions(hash_functions_) { auto atomFromAST = [this](auto & node, auto &, auto & constants, auto & out) { return traverseAtomAST(node, constants, out); }; - rpn = std::move(RPNBuilder(info, context, atomFromAST).extractRPN()); + rpn = std::move(RPNBuilder(info_, context, atomFromAST).extractRPN()); } bool MergeTreeIndexConditionBloomFilter::alwaysUnknownOrTrue() const diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h b/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h index 6c268cadbb6..d3c62bbcaa7 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h @@ -37,7 +37,7 @@ public: std::vector> predicate; }; - MergeTreeIndexConditionBloomFilter(const SelectQueryInfo & info, const Context & context, const Block & header, size_t hash_functions); + MergeTreeIndexConditionBloomFilter(const SelectQueryInfo & info_, const Context & context_, const Block & header_, size_t hash_functions_); bool alwaysUnknownOrTrue() const override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index be9994ece64..3625c6f1aa5 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -61,9 +61,9 @@ bool MergeTreeConditionFullText::createFunctionEqualsCondition(RPNElement & out, return true; } -MergeTreeIndexGranuleFullText::MergeTreeIndexGranuleFullText(const MergeTreeIndexFullText & index) +MergeTreeIndexGranuleFullText::MergeTreeIndexGranuleFullText(const MergeTreeIndexFullText & index_) : IMergeTreeIndexGranule() - , index(index) + , index(index_) , bloom_filters( index.columns.size(), BloomFilter(index.bloom_filter_size, index.bloom_filter_hashes, index.seed)) , has_elems(false) {} @@ -87,8 +87,8 @@ void MergeTreeIndexGranuleFullText::deserializeBinary(ReadBuffer & istr) } -MergeTreeIndexAggregatorFullText::MergeTreeIndexAggregatorFullText(const MergeTreeIndexFullText & index) - : index(index), granule(std::make_shared(index)) {} +MergeTreeIndexAggregatorFullText::MergeTreeIndexAggregatorFullText(const MergeTreeIndexFullText & index_) + : index(index_), granule(std::make_shared(index)) {} MergeTreeIndexGranulePtr MergeTreeIndexAggregatorFullText::getGranuleAndReset() { diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h index f6230134596..e276d811cd9 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexFullText.h @@ -16,7 +16,7 @@ class MergeTreeIndexFullText; struct MergeTreeIndexGranuleFullText : public IMergeTreeIndexGranule { explicit MergeTreeIndexGranuleFullText( - const MergeTreeIndexFullText & index); + const MergeTreeIndexFullText & index_); ~MergeTreeIndexGranuleFullText() override = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp index 4eee7309811..7d681825b0c 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.cpp @@ -11,25 +11,25 @@ namespace DB { -MergeTreeIndexGranuleBloomFilter::MergeTreeIndexGranuleBloomFilter(size_t bits_per_row, size_t hash_functions, size_t index_columns) - : bits_per_row(bits_per_row), hash_functions(hash_functions) +MergeTreeIndexGranuleBloomFilter::MergeTreeIndexGranuleBloomFilter(size_t bits_per_row_, size_t hash_functions_, size_t index_columns_) + : bits_per_row(bits_per_row_), hash_functions(hash_functions_) { total_rows = 0; - bloom_filters.resize(index_columns); + bloom_filters.resize(index_columns_); } MergeTreeIndexGranuleBloomFilter::MergeTreeIndexGranuleBloomFilter( - size_t bits_per_row, size_t hash_functions, size_t total_rows, const Blocks & granule_index_blocks) - : total_rows(total_rows), bits_per_row(bits_per_row), hash_functions(hash_functions) + size_t bits_per_row_, size_t hash_functions_, size_t total_rows_, const Blocks & granule_index_blocks_) + : total_rows(total_rows_), bits_per_row(bits_per_row_), hash_functions(hash_functions_) { - if (granule_index_blocks.empty() || !total_rows) + if (granule_index_blocks_.empty() || !total_rows) throw Exception("LOGICAL ERROR: granule_index_blocks empty or total_rows is zero.", ErrorCodes::LOGICAL_ERROR); - assertGranuleBlocksStructure(granule_index_blocks); + assertGranuleBlocksStructure(granule_index_blocks_); - for (size_t index = 0; index < granule_index_blocks.size(); ++index) + for (size_t index = 0; index < granule_index_blocks_.size(); ++index) { - Block granule_index_block = granule_index_blocks[index]; + Block granule_index_block = granule_index_blocks_[index]; if (unlikely(!granule_index_block || !granule_index_block.rows())) throw Exception("LOGICAL ERROR: granule_index_block is empty.", ErrorCodes::LOGICAL_ERROR); diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h b/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h index 79670678e79..673c5ac4706 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexGranuleBloomFilter.h @@ -9,9 +9,9 @@ namespace DB class MergeTreeIndexGranuleBloomFilter : public IMergeTreeIndexGranule { public: - MergeTreeIndexGranuleBloomFilter(size_t bits_per_row, size_t hash_functions, size_t index_columns); + MergeTreeIndexGranuleBloomFilter(size_t bits_per_row_, size_t hash_functions_, size_t index_columns_); - MergeTreeIndexGranuleBloomFilter(size_t bits_per_row, size_t hash_functions, size_t total_rows, const Blocks & granule_index_blocks); + MergeTreeIndexGranuleBloomFilter(size_t bits_per_row_, size_t hash_functions_, size_t total_rows_, const Blocks & granule_index_blocks_); bool empty() const override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp index 0d9c4722a25..360e69eacc6 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.cpp @@ -16,12 +16,12 @@ namespace ErrorCodes } -MergeTreeIndexGranuleMinMax::MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index) - : IMergeTreeIndexGranule(), index(index), parallelogram() {} +MergeTreeIndexGranuleMinMax::MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index_) + : IMergeTreeIndexGranule(), index(index_), parallelogram() {} MergeTreeIndexGranuleMinMax::MergeTreeIndexGranuleMinMax( - const MergeTreeIndexMinMax & index, std::vector && parallelogram) - : IMergeTreeIndexGranule(), index(index), parallelogram(std::move(parallelogram)) {} + const MergeTreeIndexMinMax & index_, std::vector && parallelogram_) + : IMergeTreeIndexGranule(), index(index_), parallelogram(std::move(parallelogram_)) {} void MergeTreeIndexGranuleMinMax::serializeBinary(WriteBuffer & ostr) const { @@ -83,8 +83,8 @@ void MergeTreeIndexGranuleMinMax::deserializeBinary(ReadBuffer & istr) } -MergeTreeIndexAggregatorMinMax::MergeTreeIndexAggregatorMinMax(const MergeTreeIndexMinMax & index) - : index(index) {} +MergeTreeIndexAggregatorMinMax::MergeTreeIndexAggregatorMinMax(const MergeTreeIndexMinMax & index_) + : index(index_) {} MergeTreeIndexGranulePtr MergeTreeIndexAggregatorMinMax::getGranuleAndReset() { @@ -125,8 +125,8 @@ void MergeTreeIndexAggregatorMinMax::update(const Block & block, size_t * pos, s MergeTreeIndexConditionMinMax::MergeTreeIndexConditionMinMax( const SelectQueryInfo &query, const Context &context, - const MergeTreeIndexMinMax &index) - : IMergeTreeIndexCondition(), index(index), condition(query, context, index.columns, index.expr) {} + const MergeTreeIndexMinMax &index_) + : IMergeTreeIndexCondition(), index(index_), condition(query, context, index.columns, index.expr) {} bool MergeTreeIndexConditionMinMax::alwaysUnknownOrTrue() const { @@ -169,8 +169,8 @@ bool MergeTreeIndexMinMax::mayBenefitFromIndexForIn(const ASTPtr & node) const { const String column_name = node->getColumnName(); - for (const auto & name : columns) - if (column_name == name) + for (const auto & cname : columns) + if (column_name == cname) return true; if (const auto * func = typeid_cast(node.get())) diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h b/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h index 5b514cdc738..873ea6ec98b 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexMinMax.h @@ -15,8 +15,8 @@ class MergeTreeIndexMinMax; struct MergeTreeIndexGranuleMinMax : public IMergeTreeIndexGranule { - explicit MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index); - MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index, std::vector && parallelogram); + explicit MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index_); + MergeTreeIndexGranuleMinMax(const MergeTreeIndexMinMax & index_, std::vector && parallelogram_); ~MergeTreeIndexGranuleMinMax() override = default; void serializeBinary(WriteBuffer & ostr) const override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp index cd2725719a7..05f09041fed 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexReader.cpp @@ -5,12 +5,12 @@ namespace DB { MergeTreeIndexReader::MergeTreeIndexReader( - MergeTreeIndexPtr index, MergeTreeData::DataPartPtr part, size_t marks_count, const MarkRanges & all_mark_ranges) - : index(index), stream( - part->getFullPath() + index->getFileName(), ".idx", marks_count, - all_mark_ranges, nullptr, false, nullptr, - part->getFileSizeOrZero(index->getFileName() + ".idx"), 0, DBMS_DEFAULT_BUFFER_SIZE, - &part->index_granularity_info, + MergeTreeIndexPtr index_, MergeTreeData::DataPartPtr part_, size_t marks_count_, const MarkRanges & all_mark_ranges_) + : index(index_), stream( + part_->getFullPath() + index->getFileName(), ".idx", marks_count_, + all_mark_ranges_, nullptr, false, nullptr, + part_->getFileSizeOrZero(index->getFileName() + ".idx"), 0, DBMS_DEFAULT_BUFFER_SIZE, + &part_->index_granularity_info, ReadBufferFromFileBase::ProfileCallback{}, CLOCK_MONOTONIC_COARSE) { stream.seekToStart(); diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h b/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h index 38dbd69f6e6..9b5b1c7fcb2 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexReader.h @@ -11,10 +11,10 @@ class MergeTreeIndexReader { public: MergeTreeIndexReader( - MergeTreeIndexPtr index, - MergeTreeData::DataPartPtr part, - size_t marks_count, - const MarkRanges & all_mark_ranges); + MergeTreeIndexPtr index_, + MergeTreeData::DataPartPtr part_, + size_t marks_count_, + const MarkRanges & all_mark_ranges_); void seek(size_t mark); diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 6d3f4dc9be7..40aba822353 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -21,16 +21,16 @@ namespace ErrorCodes const Field UNKNOWN_FIELD(3u); -MergeTreeIndexGranuleSet::MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index) +MergeTreeIndexGranuleSet::MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index_) : IMergeTreeIndexGranule() - , index(index) + , index(index_) , block(index.header.cloneEmpty()) {} MergeTreeIndexGranuleSet::MergeTreeIndexGranuleSet( - const MergeTreeIndexSet & index, MutableColumns && mutable_columns) + const MergeTreeIndexSet & index_, MutableColumns && mutable_columns_) : IMergeTreeIndexGranule() - , index(index) - , block(index.header.cloneWithColumns(std::move(mutable_columns))) {} + , index(index_) + , block(index.header.cloneWithColumns(std::move(mutable_columns_))) {} void MergeTreeIndexGranuleSet::serializeBinary(WriteBuffer & ostr) const { @@ -94,8 +94,8 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr) } -MergeTreeIndexAggregatorSet::MergeTreeIndexAggregatorSet(const MergeTreeIndexSet & index) - : index(index), columns(index.header.cloneEmptyColumns()) +MergeTreeIndexAggregatorSet::MergeTreeIndexAggregatorSet(const MergeTreeIndexSet & index_) + : index(index_), columns(index.header.cloneEmptyColumns()) { ColumnRawPtrs column_ptrs; column_ptrs.reserve(index.columns.size()); @@ -215,8 +215,8 @@ MergeTreeIndexGranulePtr MergeTreeIndexAggregatorSet::getGranuleAndReset() MergeTreeIndexConditionSet::MergeTreeIndexConditionSet( const SelectQueryInfo & query, const Context & context, - const MergeTreeIndexSet &index) - : IMergeTreeIndexCondition(), index(index) + const MergeTreeIndexSet &index_) + : IMergeTreeIndexCondition(), index(index_) { for (size_t i = 0, size = index.columns.size(); i < size; ++i) { diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h b/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h index 04f4d2bec1e..b6c8c6cfa06 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndexSet.h @@ -16,8 +16,8 @@ class MergeTreeIndexSet; struct MergeTreeIndexGranuleSet : public IMergeTreeIndexGranule { - explicit MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index); - MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index, MutableColumns && columns); + explicit MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index_); + MergeTreeIndexGranuleSet(const MergeTreeIndexSet & index_, MutableColumns && columns_); void serializeBinary(WriteBuffer & ostr) const override; void deserializeBinary(ReadBuffer & istr) override; diff --git a/dbms/src/Storages/MergeTree/MergeTreeIndices.h b/dbms/src/Storages/MergeTree/MergeTreeIndices.h index 2a00c902810..c430d1e8135 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeIndices.h +++ b/dbms/src/Storages/MergeTree/MergeTreeIndices.h @@ -77,18 +77,18 @@ class IMergeTreeIndex { public: IMergeTreeIndex( - String name, - ExpressionActionsPtr expr, - const Names & columns, - const DataTypes & data_types, - const Block & header, - size_t granularity) - : name(name) - , expr(expr) - , columns(columns) - , data_types(data_types) - , header(header) - , granularity(granularity) {} + String name_, + ExpressionActionsPtr expr_, + const Names & columns_, + const DataTypes & data_types_, + const Block & header_, + size_t granularity_) + : name(name_) + , expr(expr_) + , columns(columns_) + , data_types(data_types_) + , header(header_) + , granularity(granularity_) {} virtual ~IMergeTreeIndex() = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp index bc1468f2fb7..2aae847217e 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -13,9 +13,9 @@ namespace DB { MergeTreeRangeReader::DelayedStream::DelayedStream( - size_t from_mark, MergeTreeReader * merge_tree_reader) + size_t from_mark, MergeTreeReader * merge_tree_reader_) : current_mark(from_mark), current_offset(0), num_delayed_rows(0) - , merge_tree_reader(merge_tree_reader) + , merge_tree_reader(merge_tree_reader_) , index_granularity(&(merge_tree_reader->data_part->index_granularity)) , continue_reading(false), is_finished(false) { @@ -108,10 +108,10 @@ size_t MergeTreeRangeReader::DelayedStream::finalize(Block & block) MergeTreeRangeReader::Stream::Stream( - size_t from_mark, size_t to_mark, MergeTreeReader * merge_tree_reader) + size_t from_mark, size_t to_mark, MergeTreeReader * merge_tree_reader_) : current_mark(from_mark), offset_after_current_mark(0) , last_mark(to_mark) - , merge_tree_reader(merge_tree_reader) + , merge_tree_reader(merge_tree_reader_) , index_granularity(&(merge_tree_reader->data_part->index_granularity)) , current_mark_index_granularity(index_granularity->getMarkRows(from_mark)) , stream(from_mark, merge_tree_reader) @@ -406,15 +406,15 @@ void MergeTreeRangeReader::ReadResult::setFilter(const ColumnPtr & new_filter) MergeTreeRangeReader::MergeTreeRangeReader( - MergeTreeReader * merge_tree_reader, MergeTreeRangeReader * prev_reader, - ExpressionActionsPtr alias_actions, ExpressionActionsPtr prewhere_actions, - const String * prewhere_column_name, const Names * ordered_names, - bool always_reorder, bool remove_prewhere_column, bool last_reader_in_chain) - : merge_tree_reader(merge_tree_reader), index_granularity(&(merge_tree_reader->data_part->index_granularity)) - , prev_reader(prev_reader), prewhere_column_name(prewhere_column_name) - , ordered_names(ordered_names), alias_actions(alias_actions), prewhere_actions(std::move(prewhere_actions)) - , always_reorder(always_reorder), remove_prewhere_column(remove_prewhere_column) - , last_reader_in_chain(last_reader_in_chain), is_initialized(true) + MergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_, + ExpressionActionsPtr alias_actions_, ExpressionActionsPtr prewhere_actions_, + const String * prewhere_column_name_, const Names * ordered_names_, + bool always_reorder_, bool remove_prewhere_column_, bool last_reader_in_chain_) + : merge_tree_reader(merge_tree_reader_), index_granularity(&(merge_tree_reader->data_part->index_granularity)) + , prev_reader(prev_reader_), prewhere_column_name(prewhere_column_name_) + , ordered_names(ordered_names_), alias_actions(alias_actions_), prewhere_actions(std::move(prewhere_actions_)) + , always_reorder(always_reorder_), remove_prewhere_column(remove_prewhere_column_) + , last_reader_in_chain(last_reader_in_chain_), is_initialized(true) { } diff --git a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h index c80fff31419..9552373901c 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/dbms/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -20,10 +20,10 @@ class MergeTreeIndexGranularity; class MergeTreeRangeReader { public: - MergeTreeRangeReader(MergeTreeReader * merge_tree_reader, MergeTreeRangeReader * prev_reader, - ExpressionActionsPtr alias_actions, ExpressionActionsPtr prewhere_actions, - const String * prewhere_column_name, const Names * ordered_names, - bool always_reorder, bool remove_prewhere_column, bool last_reader_in_chain); + MergeTreeRangeReader(MergeTreeReader * merge_tree_reader_, MergeTreeRangeReader * prev_reader_, + ExpressionActionsPtr alias_actions_, ExpressionActionsPtr prewhere_actions_, + const String * prewhere_column_name_, const Names * ordered_names_, + bool always_reorder_, bool remove_prewhere_column_, bool last_reader_in_chain_); MergeTreeRangeReader() = default; diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp b/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp index d98fb8ac87e..6298c098220 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -19,14 +19,14 @@ namespace DB MergeTreeReadPool::MergeTreeReadPool( - const size_t threads, const size_t sum_marks, const size_t min_marks_for_concurrent_read, - RangesInDataParts parts, const MergeTreeData & data, const PrewhereInfoPtr & prewhere_info, - const bool check_columns, const Names & column_names, - const BackoffSettings & backoff_settings, size_t preferred_block_size_bytes, - const bool do_not_steal_tasks) - : backoff_settings{backoff_settings}, backoff_state{threads}, data{data}, - column_names{column_names}, do_not_steal_tasks{do_not_steal_tasks}, - predict_block_size_bytes{preferred_block_size_bytes > 0}, prewhere_info{prewhere_info}, parts_ranges{parts} + const size_t threads_, const size_t sum_marks_, const size_t min_marks_for_concurrent_read_, + RangesInDataParts parts_, const MergeTreeData & data_, const PrewhereInfoPtr & prewhere_info_, + const bool check_columns_, const Names & column_names_, + const BackoffSettings & backoff_settings_, size_t preferred_block_size_bytes_, + const bool do_not_steal_tasks_) + : backoff_settings{backoff_settings_}, backoff_state{threads_}, data{data_}, + column_names{column_names_}, do_not_steal_tasks{do_not_steal_tasks_}, + predict_block_size_bytes{preferred_block_size_bytes_ > 0}, prewhere_info{prewhere_info_}, parts_ranges{parts_} { /// reverse from right-to-left to left-to-right /// because 'reverse' was done in MergeTreeDataSelectExecutor @@ -34,8 +34,8 @@ MergeTreeReadPool::MergeTreeReadPool( std::reverse(std::begin(part_ranges.ranges), std::end(part_ranges.ranges)); /// parts don't contain duplicate MergeTreeDataPart's. - const auto per_part_sum_marks = fillPerPartInfo(parts, check_columns); - fillPerThreadInfo(threads, sum_marks, per_part_sum_marks, parts, min_marks_for_concurrent_read); + const auto per_part_sum_marks = fillPerPartInfo(parts_, check_columns_); + fillPerThreadInfo(threads_, sum_marks_, per_part_sum_marks, parts_, min_marks_for_concurrent_read_); } diff --git a/dbms/src/Storages/MergeTree/MergeTreeReadPool.h b/dbms/src/Storages/MergeTree/MergeTreeReadPool.h index e7c8cb7c7da..2e9cb76f0cd 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/dbms/src/Storages/MergeTree/MergeTreeReadPool.h @@ -66,11 +66,11 @@ private: public: MergeTreeReadPool( - const size_t threads, const size_t sum_marks, const size_t min_marks_for_concurrent_read, - RangesInDataParts parts, const MergeTreeData & data, const PrewhereInfoPtr & prewhere_info, - const bool check_columns, const Names & column_names, - const BackoffSettings & backoff_settings, size_t preferred_block_size_bytes, - const bool do_not_steal_tasks = false); + const size_t threads_, const size_t sum_marks_, const size_t min_marks_for_concurrent_read_, + RangesInDataParts parts_, const MergeTreeData & data_, const PrewhereInfoPtr & prewhere_info_, + const bool check_columns_, const Names & column_names_, + const BackoffSettings & backoff_settings_, size_t preferred_block_size_bytes_, + const bool do_not_steal_tasks_ = false); MergeTreeReadTaskPtr getTask(const size_t min_marks_to_read, const size_t thread, const Names & ordered_names); diff --git a/dbms/src/Storages/MergeTree/MergeTreeReader.cpp b/dbms/src/Storages/MergeTree/MergeTreeReader.cpp index 9f794f1a884..d9732c8ac6f 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReader.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeReader.cpp @@ -31,21 +31,21 @@ namespace ErrorCodes MergeTreeReader::~MergeTreeReader() = default; -MergeTreeReader::MergeTreeReader(const String & path, - const MergeTreeData::DataPartPtr & data_part, const NamesAndTypesList & columns, - UncompressedCache * uncompressed_cache, MarkCache * mark_cache, bool save_marks_in_cache, - const MergeTreeData & storage, const MarkRanges & all_mark_ranges, - size_t aio_threshold, size_t max_read_buffer_size, const ValueSizeMap & avg_value_size_hints, - const ReadBufferFromFileBase::ProfileCallback & profile_callback, - clockid_t clock_type) - : data_part(data_part), avg_value_size_hints(avg_value_size_hints), path(path), columns(columns) - , uncompressed_cache(uncompressed_cache), mark_cache(mark_cache), save_marks_in_cache(save_marks_in_cache), storage(storage) - , all_mark_ranges(all_mark_ranges), aio_threshold(aio_threshold), max_read_buffer_size(max_read_buffer_size) +MergeTreeReader::MergeTreeReader(const String & path_, + const MergeTreeData::DataPartPtr & data_part_, const NamesAndTypesList & columns_, + UncompressedCache * uncompressed_cache_, MarkCache * mark_cache_, bool save_marks_in_cache_, + const MergeTreeData & storage_, const MarkRanges & all_mark_ranges_, + size_t aio_threshold_, size_t max_read_buffer_size_, const ValueSizeMap & avg_value_size_hints_, + const ReadBufferFromFileBase::ProfileCallback & profile_callback_, + clockid_t clock_type_) + : data_part(data_part_), avg_value_size_hints(avg_value_size_hints_), path(path_), columns(columns_) + , uncompressed_cache(uncompressed_cache_), mark_cache(mark_cache_), save_marks_in_cache(save_marks_in_cache_), storage(storage_) + , all_mark_ranges(all_mark_ranges_), aio_threshold(aio_threshold_), max_read_buffer_size(max_read_buffer_size_) { try { for (const NameAndTypePair & column : columns) - addStreams(column.name, *column.type, profile_callback, clock_type); + addStreams(column.name, *column.type, profile_callback_, clock_type_); } catch (...) { diff --git a/dbms/src/Storages/MergeTree/MergeTreeReader.h b/dbms/src/Storages/MergeTree/MergeTreeReader.h index 6d23d413cf4..25f4c9ddd32 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeReader.h +++ b/dbms/src/Storages/MergeTree/MergeTreeReader.h @@ -19,16 +19,16 @@ public: using ValueSizeMap = std::map; using DeserializeBinaryBulkStateMap = std::map; - MergeTreeReader(const String & path, /// Path to the directory containing the part - const MergeTreeData::DataPartPtr & data_part, const NamesAndTypesList & columns, - UncompressedCache * uncompressed_cache, - MarkCache * mark_cache, - bool save_marks_in_cache, - const MergeTreeData & storage, const MarkRanges & all_mark_ranges, - size_t aio_threshold, size_t max_read_buffer_size, - const ValueSizeMap & avg_value_size_hints = ValueSizeMap{}, - const ReadBufferFromFileBase::ProfileCallback & profile_callback = ReadBufferFromFileBase::ProfileCallback{}, - clockid_t clock_type = CLOCK_MONOTONIC_COARSE); + MergeTreeReader(const String & path_, /// Path to the directory containing the part + const MergeTreeData::DataPartPtr & data_part_, const NamesAndTypesList & columns_, + UncompressedCache * uncompressed_cache_, + MarkCache * mark_cache_, + bool save_marks_in_cache_, + const MergeTreeData & storage_, const MarkRanges & all_mark_ranges_, + size_t aio_threshold_, size_t max_read_buffer_size_, + const ValueSizeMap & avg_value_size_hints_ = ValueSizeMap{}, + const ReadBufferFromFileBase::ProfileCallback & profile_callback_ = ReadBufferFromFileBase::ProfileCallback{}, + clockid_t clock_type_ = CLOCK_MONOTONIC_COARSE); ~MergeTreeReader(); diff --git a/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.cpp b/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.cpp index 2d48b362902..7a6e6f197dd 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.cpp @@ -23,7 +23,7 @@ MergeTreeSelectBlockInputStream::MergeTreeSelectBlockInputStream( const MarkRanges & mark_ranges_, bool use_uncompressed_cache_, const PrewhereInfoPtr & prewhere_info_, - bool check_columns, + bool check_columns_, size_t min_bytes_to_use_direct_io_, size_t max_read_buffer_size_, bool save_marks_in_cache_, @@ -39,7 +39,7 @@ MergeTreeSelectBlockInputStream::MergeTreeSelectBlockInputStream( part_columns_lock(data_part->columns_lock), all_mark_ranges(mark_ranges_), part_index_in_query(part_index_in_query_), - check_columns(check_columns), + check_columns(check_columns_), path(data_part->getFullPath()) { /// Let's estimate total number of rows for progress bar. diff --git a/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.h b/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.h index 4faeaa0d397..0fc9830f5d0 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.h +++ b/dbms/src/Storages/MergeTree/MergeTreeSelectBlockInputStream.h @@ -22,7 +22,7 @@ public: UInt64 max_block_size_rows, size_t preferred_block_size_bytes, size_t preferred_max_column_in_block_size_bytes, - Names column_names, + Names column_names_, const MarkRanges & mark_ranges, bool use_uncompressed_cache, const PrewhereInfoPtr & prewhere_info, diff --git a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp b/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp index 1642b9da602..9c34782dec8 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp @@ -8,8 +8,8 @@ namespace DB MergeTreeThreadSelectBlockInputStream::MergeTreeThreadSelectBlockInputStream( - const size_t thread, - const MergeTreeReadPoolPtr & pool, + const size_t thread_, + const MergeTreeReadPoolPtr & pool_, const size_t min_marks_to_read_, const UInt64 max_block_size_rows_, size_t preferred_block_size_bytes_, @@ -23,8 +23,8 @@ MergeTreeThreadSelectBlockInputStream::MergeTreeThreadSelectBlockInputStream( MergeTreeBaseSelectBlockInputStream{storage_, prewhere_info_, max_block_size_rows_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, settings.min_bytes_to_use_direct_io, settings.max_read_buffer_size, use_uncompressed_cache_, true, virt_column_names_}, - thread{thread}, - pool{pool} + thread{thread_}, + pool{pool_} { /// round min_marks_to_read up to nearest multiple of block_size expressed in marks /// If granularity is adaptive it doesn't make sense @@ -70,7 +70,7 @@ bool MergeTreeThreadSelectBlockInputStream::getNewTask() const std::string path = task->data_part->getFullPath(); /// Allows pool to reduce number of threads in case of too slow reads. - auto profile_callback = [this](ReadBufferFromFileBase::ProfileInfo info) { pool->profileFeedback(info); }; + auto profile_callback = [this](ReadBufferFromFileBase::ProfileInfo info_) { pool->profileFeedback(info_); }; if (!reader) { diff --git a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h b/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h index 592b6b71f73..3c7dfb7927d 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h +++ b/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h @@ -15,17 +15,17 @@ class MergeTreeThreadSelectBlockInputStream : public MergeTreeBaseSelectBlockInp { public: MergeTreeThreadSelectBlockInputStream( - const size_t thread, - const std::shared_ptr & pool, - const size_t min_marks_to_read, - const UInt64 max_block_size, - size_t preferred_block_size_bytes, - size_t preferred_max_column_in_block_size_bytes, - const MergeTreeData & storage, - const bool use_uncompressed_cache, - const PrewhereInfoPtr & prewhere_info, - const Settings & settings, - const Names & virt_column_names); + const size_t thread_, + const std::shared_ptr & pool_, + const size_t min_marks_to_read_, + const UInt64 max_block_size_, + size_t preferred_block_size_bytes_, + size_t preferred_max_column_in_block_size_bytes_, + const MergeTreeData & storage_, + const bool use_uncompressed_cache_, + const PrewhereInfoPtr & prewhere_info_, + const Settings & settings_, + const Names & virt_column_names_); String getName() const override { return "MergeTreeThread"; } diff --git a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 4311fd027ef..a772e0a204b 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -32,13 +32,13 @@ MergeTreeWhereOptimizer::MergeTreeWhereOptimizer( SelectQueryInfo & query_info, const Context & context, const MergeTreeData & data, - const Names & queried_columns, - Logger * log) + const Names & queried_columns_, + Logger * log_) : table_columns{ext::map(data.getColumns().getAllPhysical(), [] (const NameAndTypePair & col) { return col.name; })}, - queried_columns{queried_columns}, + queried_columns{queried_columns_}, block_with_constants{KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context)}, - log{log} + log{log_} { if (!data.primary_key_columns.empty()) first_primary_key_column = data.primary_key_columns[0]; @@ -62,7 +62,7 @@ void MergeTreeWhereOptimizer::calculateColumnSizes(const MergeTreeData & data, c static void collectIdentifiersNoSubqueries(const ASTPtr & ast, NameSet & set) { - if (auto opt_name = getIdentifierName(ast)) + if (auto opt_name = tryGetIdentifierName(ast)) return (void)set.insert(*opt_name); if (ast->as()) diff --git a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h b/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h index ccc1195cada..bdfcaad6a51 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h +++ b/dbms/src/Storages/MergeTree/MergeTreeWhereOptimizer.h @@ -31,8 +31,8 @@ public: SelectQueryInfo & query_info, const Context & context, const MergeTreeData & data, - const Names & queried_column_names, - Poco::Logger * log); + const Names & queried_column_names_, + Poco::Logger * log_); private: void optimize(ASTSelectQuery & select) const; diff --git a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index 0c0b1785add..e79ec7dd046 100644 --- a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -6,18 +6,18 @@ namespace DB MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( MergeTreeData & storage_, const Block & header_, const String & part_path_, bool sync_, CompressionCodecPtr default_codec_, bool skip_offsets_, - const std::vector & indices_to_recalc, - WrittenOffsetColumns & already_written_offset_columns, + const std::vector & indices_to_recalc_, + WrittenOffsetColumns & already_written_offset_columns_, const MergeTreeIndexGranularity & index_granularity_) : IMergedBlockOutputStream( storage_, part_path_, storage_.global_context.getSettings().min_compress_block_size, storage_.global_context.getSettings().max_compress_block_size, default_codec_, storage_.global_context.getSettings().min_bytes_to_use_direct_io, - false, indices_to_recalc, index_granularity_) - , header(header_) - , sync(sync_) - , skip_offsets(skip_offsets_) - , already_written_offset_columns(already_written_offset_columns) + false, + indices_to_recalc_, + index_granularity_), + header(header_), sync(sync_), skip_offsets(skip_offsets_), + already_written_offset_columns(already_written_offset_columns_) { } diff --git a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h b/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h index adf5d7f7bfd..b8d637f37fb 100644 --- a/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h +++ b/dbms/src/Storages/MergeTree/MergedColumnOnlyOutputStream.h @@ -15,8 +15,8 @@ public: MergedColumnOnlyOutputStream( MergeTreeData & storage_, const Block & header_, const String & part_path_, bool sync_, CompressionCodecPtr default_codec_, bool skip_offsets_, - const std::vector & indices_to_recalc, - WrittenOffsetColumns & already_written_offset_columns, + const std::vector & indices_to_recalc_, + WrittenOffsetColumns & already_written_offset_columns_, const MergeTreeIndexGranularity & index_granularity_); Block getHeader() const override { return header; } diff --git a/dbms/src/Storages/MergeTree/RangesInDataPart.h b/dbms/src/Storages/MergeTree/RangesInDataPart.h index a93a2103841..4f5d34e118d 100644 --- a/dbms/src/Storages/MergeTree/RangesInDataPart.h +++ b/dbms/src/Storages/MergeTree/RangesInDataPart.h @@ -16,9 +16,9 @@ struct RangesInDataPart RangesInDataPart() = default; - RangesInDataPart(const MergeTreeData::DataPartPtr & data_part, const size_t part_index_in_query, - const MarkRanges & ranges = MarkRanges{}) - : data_part{data_part}, part_index_in_query{part_index_in_query}, ranges{ranges} + RangesInDataPart(const MergeTreeData::DataPartPtr & data_part_, const size_t part_index_in_query_, + const MarkRanges & ranges_ = MarkRanges{}) + : data_part{data_part_}, part_index_in_query{part_index_in_query_}, ranges{ranges_} { } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index 335a88313b6..3bb7e04fe6e 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -33,8 +33,8 @@ namespace ErrorCodes ReplicatedMergeTreeBlockOutputStream::ReplicatedMergeTreeBlockOutputStream( - StorageReplicatedMergeTree & storage_, size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block, bool deduplicate_) - : storage(storage_), quorum(quorum_), quorum_timeout_ms(quorum_timeout_ms_), max_parts_per_block(max_parts_per_block), deduplicate(deduplicate_), + StorageReplicatedMergeTree & storage_, size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block_, bool deduplicate_) + : storage(storage_), quorum(quorum_), quorum_timeout_ms(quorum_timeout_ms_), max_parts_per_block(max_parts_per_block_), deduplicate(deduplicate_), log(&Logger::get(storage.getLogName() + " (Replicated OutputStream)")) { /// The quorum value `1` has the same meaning as if it is disabled. diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h index e20a36a9440..0f6fc1e7cee 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.h @@ -23,7 +23,7 @@ class ReplicatedMergeTreeBlockOutputStream : public IBlockOutputStream { public: ReplicatedMergeTreeBlockOutputStream(StorageReplicatedMergeTree & storage_, - size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block, + size_t quorum_, size_t quorum_timeout_ms_, size_t max_parts_per_block_, bool deduplicate_); Block getHeader() const override; diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h index 322ee593c46..198c9714f64 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.h @@ -41,7 +41,7 @@ public: { ReplicatedMergeTreePartCheckThread * parent; - TemporarilyStop(ReplicatedMergeTreePartCheckThread * parent) : parent(parent) + TemporarilyStop(ReplicatedMergeTreePartCheckThread * parent_) : parent(parent_) { parent->stop(); } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index a6be0c3b9a9..665e8c9bd5c 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1014,8 +1014,8 @@ Int64 ReplicatedMergeTreeQueue::getCurrentMutationVersion(const String & partiti } -ReplicatedMergeTreeQueue::CurrentlyExecuting::CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue) - : entry(entry_), queue(queue) +ReplicatedMergeTreeQueue::CurrentlyExecuting::CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue_) + : entry(entry_), queue(queue_) { entry->currently_executing = true; ++entry->num_tries; @@ -1452,7 +1452,7 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( for (const String & partition : partitions) lock_futures.push_back(zookeeper->asyncGetChildren(queue.zookeeper_path + "/block_numbers/" + partition)); - struct BlockInfo + struct BlockInfo_ { String partition; Int64 number; @@ -1460,7 +1460,7 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( std::future contents_future; }; - std::vector block_infos; + std::vector block_infos; for (size_t i = 0; i < partitions.size(); ++i) { Strings partition_block_numbers = lock_futures[i].get().names; @@ -1472,13 +1472,13 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( { Int64 block_number = parse(entry.substr(strlen("block-"))); String zk_path = queue.zookeeper_path + "/block_numbers/" + partitions[i] + "/" + entry; - block_infos.push_back( - BlockInfo{partitions[i], block_number, zk_path, zookeeper->asyncTryGet(zk_path)}); + block_infos.emplace_back( + BlockInfo_{partitions[i], block_number, zk_path, zookeeper->asyncTryGet(zk_path)}); } } } - for (BlockInfo & block : block_infos) + for (auto & block : block_infos) { Coordination::GetResponse resp = block.contents_future.get(); if (!resp.error && lock_holder_paths.count(resp.data)) diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 0d439d7b610..47d82f4a9a2 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -138,7 +138,7 @@ private: friend class SubscriberHandler; struct SubscriberHandler : public boost::noncopyable { - SubscriberHandler(SubscriberIterator it, ReplicatedMergeTreeQueue & queue) : it(it), queue(queue) {} + SubscriberHandler(SubscriberIterator it_, ReplicatedMergeTreeQueue & queue_) : it(it_), queue(queue_) {} ~SubscriberHandler(); private: @@ -215,7 +215,7 @@ private: friend class ReplicatedMergeTreeQueue; /// Created only in the selectEntryToProcess function. It is called under mutex. - CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue); + CurrentlyExecuting(const ReplicatedMergeTreeQueue::LogEntryPtr & entry_, ReplicatedMergeTreeQueue & queue_); /// In case of fetch, we determine actual part during the execution, so we need to update entry. It is called under state_mutex. static void setActualPartName(ReplicatedMergeTreeQueue::LogEntry & entry, const String & actual_part_name, diff --git a/dbms/src/Storages/MergeTree/SimpleMergeSelector.h b/dbms/src/Storages/MergeTree/SimpleMergeSelector.h index e699f605b8f..729eaa966e9 100644 --- a/dbms/src/Storages/MergeTree/SimpleMergeSelector.h +++ b/dbms/src/Storages/MergeTree/SimpleMergeSelector.h @@ -71,7 +71,7 @@ public: double heuristic_to_remove_small_parts_at_right_max_ratio = 0.01; }; - explicit SimpleMergeSelector(const Settings & settings) : settings(settings) {} + explicit SimpleMergeSelector(const Settings & settings_) : settings(settings_) {} PartsInPartition select( const Partitions & partitions, diff --git a/dbms/src/Storages/MergeTree/checkDataPart.cpp b/dbms/src/Storages/MergeTree/checkDataPart.cpp index 2ae83b5076a..2303ec38efa 100644 --- a/dbms/src/Storages/MergeTree/checkDataPart.cpp +++ b/dbms/src/Storages/MergeTree/checkDataPart.cpp @@ -72,11 +72,11 @@ public: Stream( const String & path, - const String & base_name, + const String & base_name_, const String & bin_file_extension_, const String & mrk_file_extension_, const MergeTreeIndexGranularity & index_granularity_) - : base_name(base_name) + : base_name(base_name_) , bin_file_extension(bin_file_extension_) , mrk_file_extension(mrk_file_extension_) , bin_file_path(path + base_name + bin_file_extension) diff --git a/dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp b/dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp index 138e7c14f9d..19dbe49b27e 100644 --- a/dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/dbms/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -45,13 +45,13 @@ static Names extractColumnNames(const ASTPtr & node) Names res; res.reserve(elements.size()); for (const auto & elem : elements) - res.push_back(*getIdentifierName(elem)); + res.push_back(getIdentifierName(elem)); return res; } else { - return { *getIdentifierName(node) }; + return { getIdentifierName(node) }; } } @@ -502,7 +502,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) if (merging_params.mode == MergeTreeData::MergingParams::Collapsing) { - if (!getIdentifierName(engine_args.back(), merging_params.sign_column)) + if (!tryGetIdentifierNameInto(engine_args.back(), merging_params.sign_column)) throw Exception( "Sign column name must be an unquoted string" + getMergeTreeVerboseHelp(is_extended_storage_def), ErrorCodes::BAD_ARGUMENTS); @@ -514,7 +514,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) /// If the last element is not index_granularity or replica_name (a literal), then this is the name of the version column. if (!engine_args.empty() && !engine_args.back()->as()) { - if (!getIdentifierName(engine_args.back(), merging_params.version_column)) + if (!tryGetIdentifierNameInto(engine_args.back(), merging_params.version_column)) throw Exception( "Version column name must be an unquoted string" + getMergeTreeVerboseHelp(is_extended_storage_def), ErrorCodes::BAD_ARGUMENTS); @@ -552,14 +552,14 @@ static StoragePtr create(const StorageFactory::Arguments & args) } else if (merging_params.mode == MergeTreeData::MergingParams::VersionedCollapsing) { - if (!getIdentifierName(engine_args.back(), merging_params.version_column)) + if (!tryGetIdentifierNameInto(engine_args.back(), merging_params.version_column)) throw Exception( "Version column name must be an unquoted string" + getMergeTreeVerboseHelp(is_extended_storage_def), ErrorCodes::BAD_ARGUMENTS); engine_args.pop_back(); - if (!getIdentifierName(engine_args.back(), merging_params.sign_column)) + if (!tryGetIdentifierNameInto(engine_args.back(), merging_params.sign_column)) throw Exception( "Sign column name must be an unquoted string" + getMergeTreeVerboseHelp(is_extended_storage_def), ErrorCodes::BAD_ARGUMENTS); @@ -616,7 +616,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) /// Now only three parameters remain - date (or partitioning expression), primary_key, index_granularity. - if (!getIdentifierName(engine_args[0], date_column_name)) + if (!tryGetIdentifierNameInto(engine_args[0], date_column_name)) throw Exception( "Date column name must be an unquoted string" + getMergeTreeVerboseHelp(is_extended_storage_def), ErrorCodes::BAD_ARGUMENTS); diff --git a/dbms/src/Storages/PartitionCommands.cpp b/dbms/src/Storages/PartitionCommands.cpp index 054a8be4e4e..028e8170a9f 100644 --- a/dbms/src/Storages/PartitionCommands.cpp +++ b/dbms/src/Storages/PartitionCommands.cpp @@ -65,7 +65,7 @@ std::optional PartitionCommand::parse(const ASTAlterCommand * PartitionCommand res; res.type = CLEAR_COLUMN; res.partition = command_ast->partition; - res.column_name = *getIdentifierName(command_ast->column); + res.column_name = getIdentifierName(command_ast->column); return res; } else if (command_ast->type == ASTAlterCommand::DROP_INDEX && command_ast->partition) @@ -76,7 +76,7 @@ std::optional PartitionCommand::parse(const ASTAlterCommand * PartitionCommand res; res.type = CLEAR_INDEX; res.partition = command_ast->partition; - res.index_name = *getIdentifierName(command_ast->index); + res.index_name = getIdentifierName(command_ast->index); return res; } else if (command_ast->type == ASTAlterCommand::FREEZE_ALL) diff --git a/dbms/src/Storages/StorageDistributed.cpp b/dbms/src/Storages/StorageDistributed.cpp index 6155dabd028..e862d27fdaa 100644 --- a/dbms/src/Storages/StorageDistributed.cpp +++ b/dbms/src/Storages/StorageDistributed.cpp @@ -190,8 +190,8 @@ static ExpressionActionsPtr buildShardingKeyExpression(const ASTPtr & sharding_k } StorageDistributed::StorageDistributed( - const String & database_name, - const String & table_name, + const String & database_name_, + const String & table_name_, const ColumnsDescription & columns_, const String & remote_database_, const String & remote_table_, @@ -199,8 +199,8 @@ StorageDistributed::StorageDistributed( const Context & context_, const ASTPtr & sharding_key_, const String & data_path_, - bool attach) - : IStorage{columns_}, table_name(table_name), database_name(database_name), + bool attach_) + : IStorage{columns_}, table_name(table_name_), database_name(database_name_), remote_database(remote_database_), remote_table(remote_table_), global_context(context_), cluster_name(global_context.getMacros()->expand(cluster_name_)), has_sharding_key(sharding_key_), sharding_key_expr(sharding_key_ ? buildShardingKeyExpression(sharding_key_, global_context, getColumns().getAllPhysical(), false) : nullptr), @@ -208,7 +208,7 @@ StorageDistributed::StorageDistributed( path(data_path_.empty() ? "" : (data_path_ + escapeForFileName(table_name) + '/')) { /// Sanity check. Skip check if the table is already created to allow the server to start. - if (!attach && !cluster_name.empty()) + if (!attach_ && !cluster_name.empty()) { size_t num_local_shards = global_context.getCluster(cluster_name)->getLocalShardCount(); if (num_local_shards && remote_database == database_name && remote_table == table_name) @@ -218,7 +218,7 @@ StorageDistributed::StorageDistributed( StorageDistributed::StorageDistributed( - const String & database_name, + const String & database_name_, const String & table_name_, const ColumnsDescription & columns_, ASTPtr remote_table_function_ptr_, @@ -227,7 +227,7 @@ StorageDistributed::StorageDistributed( const ASTPtr & sharding_key_, const String & data_path_, bool attach) - : StorageDistributed(database_name, table_name_, columns_, String{}, String{}, cluster_name_, context_, sharding_key_, data_path_, attach) + : StorageDistributed(database_name_, table_name_, columns_, String{}, String{}, cluster_name_, context_, sharding_key_, data_path_, attach) { remote_table_function_ptr = remote_table_function_ptr_; } @@ -493,7 +493,7 @@ ClusterPtr StorageDistributed::skipUnusedShards(ClusterPtr cluster, const Select { const auto & select = query_info.query->as(); - if (!select.where()) + if (!select.where() || !sharding_key_expr) return nullptr; const auto & blocks = evaluateExpressionOverConstantCondition(select.where(), sharding_key_expr); diff --git a/dbms/src/Storages/StorageDistributed.h b/dbms/src/Storages/StorageDistributed.h index 86fe80f575f..6885a758e9e 100644 --- a/dbms/src/Storages/StorageDistributed.h +++ b/dbms/src/Storages/StorageDistributed.h @@ -158,7 +158,7 @@ public: protected: StorageDistributed( - const String & database_name, + const String & database_name_, const String & table_name_, const ColumnsDescription & columns_, const String & remote_database_, @@ -167,7 +167,7 @@ protected: const Context & context_, const ASTPtr & sharding_key_, const String & data_path_, - bool attach); + bool attach_); StorageDistributed( const String & database_name, diff --git a/dbms/src/Storages/StorageFile.cpp b/dbms/src/Storages/StorageFile.cpp index 35bc1747dee..5162e667133 100644 --- a/dbms/src/Storages/StorageFile.cpp +++ b/dbms/src/Storages/StorageFile.cpp @@ -192,8 +192,8 @@ BlockInputStreams StorageFile::read( unsigned /*num_streams*/) { BlockInputStreamPtr block_input = std::make_shared(*this, context, max_block_size); - const ColumnsDescription & columns = getColumns(); - auto column_defaults = columns.getDefaults(); + const ColumnsDescription & columns_ = getColumns(); + auto column_defaults = columns_.getDefaults(); if (column_defaults.empty()) return {block_input}; return {std::make_shared(block_input, column_defaults, context)}; @@ -303,7 +303,7 @@ void registerStorageFile(StorageFactory & factory) { /// Will use FD if engine_args[1] is int literal or identifier with std* name - if (auto opt_name = getIdentifierName(engine_args[1])) + if (auto opt_name = tryGetIdentifierName(engine_args[1])) { if (*opt_name == "stdin") source_fd = STDIN_FILENO; diff --git a/dbms/src/Storages/StorageJoin.cpp b/dbms/src/Storages/StorageJoin.cpp index 3c90917b0f6..3de820a5f4e 100644 --- a/dbms/src/Storages/StorageJoin.cpp +++ b/dbms/src/Storages/StorageJoin.cpp @@ -90,7 +90,7 @@ void registerStorageJoin(StorageFactory & factory) "Storage Join requires at least 3 parameters: Join(ANY|ALL, LEFT|INNER, keys...).", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - auto opt_strictness_id = getIdentifierName(engine_args[0]); + auto opt_strictness_id = tryGetIdentifierName(engine_args[0]); if (!opt_strictness_id) throw Exception("First parameter of storage Join must be ANY or ALL (without quotes).", ErrorCodes::BAD_ARGUMENTS); @@ -103,7 +103,7 @@ void registerStorageJoin(StorageFactory & factory) else throw Exception("First parameter of storage Join must be ANY or ALL (without quotes).", ErrorCodes::BAD_ARGUMENTS); - auto opt_kind_id = getIdentifierName(engine_args[1]); + auto opt_kind_id = tryGetIdentifierName(engine_args[1]); if (!opt_kind_id) throw Exception("Second parameter of storage Join must be LEFT or INNER (without quotes).", ErrorCodes::BAD_ARGUMENTS); @@ -124,7 +124,7 @@ void registerStorageJoin(StorageFactory & factory) key_names.reserve(engine_args.size() - 2); for (size_t i = 2, size = engine_args.size(); i < size; ++i) { - auto opt_key = getIdentifierName(engine_args[i]); + auto opt_key = tryGetIdentifierName(engine_args[i]); if (!opt_key) throw Exception("Parameter №" + toString(i + 1) + " of storage Join don't look like column name.", ErrorCodes::BAD_ARGUMENTS); diff --git a/dbms/src/Storages/StorageLog.cpp b/dbms/src/Storages/StorageLog.cpp index 12e45ca96bc..d17caeb0046 100644 --- a/dbms/src/Storages/StorageLog.cpp +++ b/dbms/src/Storages/StorageLog.cpp @@ -87,8 +87,8 @@ private: struct Stream { - Stream(const std::string & data_path, size_t offset, size_t max_read_buffer_size) - : plain(data_path, std::min(static_cast(max_read_buffer_size), Poco::File(data_path).getSize())), + Stream(const std::string & data_path, size_t offset, size_t max_read_buffer_size_) + : plain(data_path, std::min(static_cast(max_read_buffer_size_), Poco::File(data_path).getSize())), compressed(plain) { if (offset) diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index d832303abb3..8f950e07499 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -204,8 +204,8 @@ std::vector StorageMergeTree::prepar const auto & columns_for_parts = new_columns.getAllPhysical(); - const Settings & settings = context.getSettingsRef(); - size_t thread_pool_size = std::min(parts.size(), settings.max_alter_threads); + const Settings & settings_ = context.getSettingsRef(); + size_t thread_pool_size = std::min(parts.size(), settings_.max_alter_threads); ThreadPool thread_pool(thread_pool_size); diff --git a/dbms/src/Storages/StorageMySQL.cpp b/dbms/src/Storages/StorageMySQL.cpp index 25a06a8bd4e..4ad00338793 100644 --- a/dbms/src/Storages/StorageMySQL.cpp +++ b/dbms/src/Storages/StorageMySQL.cpp @@ -39,61 +39,61 @@ String backQuoteMySQL(const String & x) StorageMySQL::StorageMySQL( const std::string & database_name_, const std::string & table_name_, - mysqlxx::Pool && pool, - const std::string & remote_database_name, - const std::string & remote_table_name, - const bool replace_query, - const std::string & on_duplicate_clause, + mysqlxx::Pool && pool_, + const std::string & remote_database_name_, + const std::string & remote_table_name_, + const bool replace_query_, + const std::string & on_duplicate_clause_, const ColumnsDescription & columns_, - const Context & context) + const Context & context_) : IStorage{columns_} , table_name(table_name_) , database_name(database_name_) - , remote_database_name(remote_database_name) - , remote_table_name(remote_table_name) - , replace_query{replace_query} - , on_duplicate_clause{on_duplicate_clause} - , pool(std::move(pool)) - , global_context(context) + , remote_database_name(remote_database_name_) + , remote_table_name(remote_table_name_) + , replace_query{replace_query_} + , on_duplicate_clause{on_duplicate_clause_} + , pool(std::move(pool_)) + , global_context(context_) { } BlockInputStreams StorageMySQL::read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, + const Names & column_names_, + const SelectQueryInfo & query_info_, + const Context & context_, QueryProcessingStage::Enum /*processed_stage*/, - size_t max_block_size, + size_t max_block_size_, unsigned) { - check(column_names); + check(column_names_); String query = transformQueryForExternalDatabase( - *query_info.query, getColumns().getOrdinary(), IdentifierQuotingStyle::BackticksMySQL, remote_database_name, remote_table_name, context); + *query_info_.query, getColumns().getOrdinary(), IdentifierQuotingStyle::BackticksMySQL, remote_database_name, remote_table_name, context_); Block sample_block; - for (const String & column_name : column_names) + for (const String & column_name : column_names_) { auto column_data = getColumn(column_name); sample_block.insert({ column_data.type, column_data.name }); } - return { std::make_shared(pool.Get(), query, sample_block, max_block_size) }; + return { std::make_shared(pool.Get(), query, sample_block, max_block_size_) }; } class StorageMySQLBlockOutputStream : public IBlockOutputStream { public: - explicit StorageMySQLBlockOutputStream(const StorageMySQL & storage, - const std::string & remote_database_name, - const std::string & remote_table_name , - const mysqlxx::PoolWithFailover::Entry & entry, + explicit StorageMySQLBlockOutputStream(const StorageMySQL & storage_, + const std::string & remote_database_name_, + const std::string & remote_table_name_, + const mysqlxx::PoolWithFailover::Entry & entry_, const size_t & mysql_max_rows_to_insert) - : storage{storage} - , remote_database_name{remote_database_name} - , remote_table_name{remote_table_name} - , entry{entry} + : storage{storage_} + , remote_database_name{remote_database_name_} + , remote_table_name{remote_table_name_} + , entry{entry_} , max_batch_rows{mysql_max_rows_to_insert} { } diff --git a/dbms/src/Storages/StorageMySQL.h b/dbms/src/Storages/StorageMySQL.h index f28bb2aaa55..3b891566ed0 100644 --- a/dbms/src/Storages/StorageMySQL.h +++ b/dbms/src/Storages/StorageMySQL.h @@ -22,13 +22,13 @@ public: StorageMySQL( const std::string & database_name_, const std::string & table_name_, - mysqlxx::Pool && pool, - const std::string & remote_database_name, - const std::string & remote_table_name, - const bool replace_query, - const std::string & on_duplicate_clause, - const ColumnsDescription & columns, - const Context & context); + mysqlxx::Pool && pool_, + const std::string & remote_database_name_, + const std::string & remote_table_name_, + const bool replace_query_, + const std::string & on_duplicate_clause_, + const ColumnsDescription & columns_, + const Context & context_); std::string getName() const override { return "MySQL"; } std::string getTableName() const override { return table_name; } diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index c7338776fbf..e03fe8241fa 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -2906,14 +2906,14 @@ BlockInputStreams StorageReplicatedMergeTree::read( const size_t max_block_size, const unsigned num_streams) { - const Settings & settings = context.getSettingsRef(); + const Settings & settings_ = context.getSettingsRef(); /** The `select_sequential_consistency` setting has two meanings: * 1. To throw an exception if on a replica there are not all parts which have been written down on quorum of remaining replicas. * 2. Do not read parts that have not yet been written to the quorum of the replicas. * For this you have to synchronously go to ZooKeeper. */ - if (settings.select_sequential_consistency) + if (settings_.select_sequential_consistency) { ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock max_added_blocks; diff --git a/dbms/src/Storages/StorageStripeLog.cpp b/dbms/src/Storages/StorageStripeLog.cpp index 3052962606d..42745e11971 100644 --- a/dbms/src/Storages/StorageStripeLog.cpp +++ b/dbms/src/Storages/StorageStripeLog.cpp @@ -214,7 +214,8 @@ StorageStripeLog::StorageStripeLog( { /// create files if they do not exist if (0 != mkdir(full_path.c_str(), S_IRWXU | S_IRWXG | S_IRWXO) && errno != EEXIST) - throwFromErrno("Cannot create directory " + full_path, ErrorCodes::CANNOT_CREATE_DIRECTORY); + throwFromErrnoWithPath("Cannot create directory " + full_path, full_path, + ErrorCodes::CANNOT_CREATE_DIRECTORY); } } diff --git a/dbms/src/Storages/StorageTinyLog.cpp b/dbms/src/Storages/StorageTinyLog.cpp index 214964c32b4..778b07503a8 100644 --- a/dbms/src/Storages/StorageTinyLog.cpp +++ b/dbms/src/Storages/StorageTinyLog.cpp @@ -86,8 +86,8 @@ private: struct Stream { - Stream(const std::string & data_path, size_t max_read_buffer_size) - : plain(data_path, std::min(static_cast(max_read_buffer_size), Poco::File(data_path).getSize())), + Stream(const std::string & data_path, size_t max_read_buffer_size_) + : plain(data_path, std::min(static_cast(max_read_buffer_size_), Poco::File(data_path).getSize())), compressed(plain) { } @@ -343,7 +343,8 @@ StorageTinyLog::StorageTinyLog( { /// create files if they do not exist if (0 != mkdir(full_path.c_str(), S_IRWXU | S_IRWXG | S_IRWXO) && errno != EEXIST) - throwFromErrno("Cannot create directory " + full_path, ErrorCodes::CANNOT_CREATE_DIRECTORY); + throwFromErrnoWithPath("Cannot create directory " + full_path, full_path, + ErrorCodes::CANNOT_CREATE_DIRECTORY); } for (const auto & col : getColumns().getAllPhysical()) diff --git a/dbms/src/Storages/StorageValues.cpp b/dbms/src/Storages/StorageValues.cpp index d289a4d6579..452d815e5ea 100644 --- a/dbms/src/Storages/StorageValues.cpp +++ b/dbms/src/Storages/StorageValues.cpp @@ -7,10 +7,10 @@ namespace DB { -StorageValues::StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns, const Block & res_block_) +StorageValues::StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns_, const Block & res_block_) : database_name(database_name_), table_name(table_name_), res_block(res_block_) { - setColumns(columns); + setColumns(columns_); } BlockInputStreams StorageValues::read( @@ -21,7 +21,7 @@ BlockInputStreams StorageValues::read( size_t /*max_block_size*/, unsigned /*num_streams*/) { - check(column_names); + check(column_names, true); return BlockInputStreams(1, std::make_shared(res_block)); } diff --git a/dbms/src/Storages/StorageValues.h b/dbms/src/Storages/StorageValues.h index f5c6881ae36..36c3bc15301 100644 --- a/dbms/src/Storages/StorageValues.h +++ b/dbms/src/Storages/StorageValues.h @@ -30,7 +30,7 @@ private: Block res_block; protected: - StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns, const Block & res_block_); + StorageValues(const std::string & database_name_, const std::string & table_name_, const ColumnsDescription & columns_, const Block & res_block_); }; } diff --git a/dbms/src/Storages/System/StorageSystemColumns.cpp b/dbms/src/Storages/System/StorageSystemColumns.cpp index 3ba1128c245..30b673ddbbb 100644 --- a/dbms/src/Storages/System/StorageSystemColumns.cpp +++ b/dbms/src/Storages/System/StorageSystemColumns.cpp @@ -54,15 +54,15 @@ class ColumnsBlockInputStream : public IBlockInputStream { public: ColumnsBlockInputStream( - const std::vector & columns_mask, - const Block & header, - UInt64 max_block_size, - ColumnPtr databases, - ColumnPtr tables, - Storages storages, + const std::vector & columns_mask_, + const Block & header_, + UInt64 max_block_size_, + ColumnPtr databases_, + ColumnPtr tables_, + Storages storages_, String query_id_) - : columns_mask(columns_mask), header(header), max_block_size(max_block_size) - , databases(databases), tables(tables), storages(std::move(storages)) + : columns_mask(columns_mask_), header(header_), max_block_size(max_block_size_) + , databases(databases_), tables(tables_), storages(std::move(storages_)) , query_id(std::move(query_id_)), total_tables(tables->size()) { } @@ -126,6 +126,9 @@ protected: for (const auto & column : columns) { + if (column.is_virtual) + continue; + size_t src_index = 0; size_t res_index = 0; diff --git a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp b/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp index 28fb2076c21..0957e8f8349 100644 --- a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp +++ b/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp @@ -20,13 +20,13 @@ void StorageSystemDataTypeFamilies::fillData(MutableColumns & res_columns, const { const auto & factory = DataTypeFactory::instance(); auto names = factory.getAllRegisteredNames(); - for (const auto & name : names) + for (const auto & dtf_name : names) { - res_columns[0]->insert(name); - res_columns[1]->insert(factory.isCaseInsensitive(name)); + res_columns[0]->insert(dtf_name); + res_columns[1]->insert(factory.isCaseInsensitive(dtf_name)); - if (factory.isAlias(name)) - res_columns[2]->insert(factory.aliasTo(name)); + if (factory.isAlias(dtf_name)) + res_columns[2]->insert(factory.aliasTo(dtf_name)); else res_columns[2]->insertDefault(); } diff --git a/dbms/src/Storages/System/StorageSystemDetachedParts.cpp b/dbms/src/Storages/System/StorageSystemDetachedParts.cpp index dbad9d8b604..617b52d8e5f 100644 --- a/dbms/src/Storages/System/StorageSystemDetachedParts.cpp +++ b/dbms/src/Storages/System/StorageSystemDetachedParts.cpp @@ -52,7 +52,7 @@ protected: /// Create the result. Block block = getSampleBlock(); - MutableColumns columns = block.cloneEmptyColumns(); + MutableColumns new_columns = block.cloneEmptyColumns(); while (StoragesInfo info = stream.next()) { @@ -60,19 +60,19 @@ protected: for (auto & p : parts) { size_t i = 0; - columns[i++]->insert(info.database); - columns[i++]->insert(info.table); - columns[i++]->insert(p.partition_id); - columns[i++]->insert(p.getPartName()); - columns[i++]->insert(p.prefix); - columns[i++]->insert(p.min_block); - columns[i++]->insert(p.max_block); - columns[i++]->insert(p.level); + new_columns[i++]->insert(info.database); + new_columns[i++]->insert(info.table); + new_columns[i++]->insert(p.partition_id); + new_columns[i++]->insert(p.getPartName()); + new_columns[i++]->insert(p.prefix); + new_columns[i++]->insert(p.min_block); + new_columns[i++]->insert(p.max_block); + new_columns[i++]->insert(p.level); } } return BlockInputStreams(1, std::make_shared( - block.cloneWithColumns(std::move(columns)))); + block.cloneWithColumns(std::move(new_columns)))); } }; diff --git a/dbms/src/Storages/System/StorageSystemFormats.cpp b/dbms/src/Storages/System/StorageSystemFormats.cpp index f0d97db5a98..158d0a662f2 100644 --- a/dbms/src/Storages/System/StorageSystemFormats.cpp +++ b/dbms/src/Storages/System/StorageSystemFormats.cpp @@ -20,10 +20,10 @@ void StorageSystemFormats::fillData(MutableColumns & res_columns, const Context const auto & formats = FormatFactory::instance().getAllFormats(); for (const auto & pair : formats) { - const auto & [name, creators] = pair; + const auto & [format_name, creators] = pair; UInt64 has_input_format(creators.inout_creator != nullptr || creators.input_processor_creator != nullptr); UInt64 has_output_format(creators.output_creator != nullptr || creators.output_processor_creator != nullptr); - res_columns[0]->insert(name); + res_columns[0]->insert(format_name); res_columns[1]->insert(has_input_format); res_columns[2]->insert(has_output_format); } diff --git a/dbms/src/Storages/System/StorageSystemFunctions.cpp b/dbms/src/Storages/System/StorageSystemFunctions.cpp index c9601373df3..e46b7007dc2 100644 --- a/dbms/src/Storages/System/StorageSystemFunctions.cpp +++ b/dbms/src/Storages/System/StorageSystemFunctions.cpp @@ -38,16 +38,16 @@ void StorageSystemFunctions::fillData(MutableColumns & res_columns, const Contex { const auto & functions_factory = FunctionFactory::instance(); const auto & function_names = functions_factory.getAllRegisteredNames(); - for (const auto & name : function_names) + for (const auto & function_name : function_names) { - fillRow(res_columns, name, UInt64(0), functions_factory); + fillRow(res_columns, function_name, UInt64(0), functions_factory); } const auto & aggregate_functions_factory = AggregateFunctionFactory::instance(); const auto & aggregate_function_names = aggregate_functions_factory.getAllRegisteredNames(); - for (const auto & name : aggregate_function_names) + for (const auto & function_name : aggregate_function_names) { - fillRow(res_columns, name, UInt64(1), aggregate_functions_factory); + fillRow(res_columns, function_name, UInt64(1), aggregate_functions_factory); } } } diff --git a/dbms/src/Storages/System/StorageSystemModels.cpp b/dbms/src/Storages/System/StorageSystemModels.cpp index b595df1e1b2..2db690ea4c3 100644 --- a/dbms/src/Storages/System/StorageSystemModels.cpp +++ b/dbms/src/Storages/System/StorageSystemModels.cpp @@ -30,9 +30,9 @@ void StorageSystemModels::fillData(MutableColumns & res_columns, const Context & const auto & external_models = context.getExternalModels(); auto load_results = external_models.getCurrentLoadResults(); - for (const auto & [name, load_result] : load_results) + for (const auto & [model_name, load_result] : load_results) { - res_columns[0]->insert(name); + res_columns[0]->insert(model_name); res_columns[1]->insert(static_cast(load_result.status)); res_columns[2]->insert(load_result.origin); diff --git a/dbms/src/Storages/System/StorageSystemParts.cpp b/dbms/src/Storages/System/StorageSystemParts.cpp index f8fffd2d9c9..65d17f096c3 100644 --- a/dbms/src/Storages/System/StorageSystemParts.cpp +++ b/dbms/src/Storages/System/StorageSystemParts.cpp @@ -14,8 +14,8 @@ namespace DB { -StorageSystemParts::StorageSystemParts(const std::string & name) - : StorageSystemPartsBase(name, +StorageSystemParts::StorageSystemParts(const std::string & name_) + : StorageSystemPartsBase(name_, { {"partition", std::make_shared()}, {"name", std::make_shared()}, @@ -55,7 +55,7 @@ StorageSystemParts::StorageSystemParts(const std::string & name) { } -void StorageSystemParts::processNextStorage(MutableColumns & columns, const StoragesInfo & info, bool has_state_column) +void StorageSystemParts::processNextStorage(MutableColumns & columns_, const StoragesInfo & info, bool has_state_column) { using State = MergeTreeDataPart::State; MergeTreeData::DataPartStateVector all_parts_state; @@ -74,56 +74,56 @@ void StorageSystemParts::processNextStorage(MutableColumns & columns, const Stor { WriteBufferFromOwnString out; part->partition.serializeText(*info.data, out, format_settings); - columns[i++]->insert(out.str()); + columns_[i++]->insert(out.str()); } - columns[i++]->insert(part->name); - columns[i++]->insert(part_state == State::Committed); - columns[i++]->insert(part->getMarksCount()); - columns[i++]->insert(part->rows_count); - columns[i++]->insert(part->bytes_on_disk.load(std::memory_order_relaxed)); - columns[i++]->insert(columns_size.data_compressed); - columns[i++]->insert(columns_size.data_uncompressed); - columns[i++]->insert(columns_size.marks); - columns[i++]->insert(static_cast(part->modification_time)); + columns_[i++]->insert(part->name); + columns_[i++]->insert(part_state == State::Committed); + columns_[i++]->insert(part->getMarksCount()); + columns_[i++]->insert(part->rows_count); + columns_[i++]->insert(part->bytes_on_disk.load(std::memory_order_relaxed)); + columns_[i++]->insert(columns_size.data_compressed); + columns_[i++]->insert(columns_size.data_uncompressed); + columns_[i++]->insert(columns_size.marks); + columns_[i++]->insert(static_cast(part->modification_time)); time_t remove_time = part->remove_time.load(std::memory_order_relaxed); - columns[i++]->insert(static_cast(remove_time == std::numeric_limits::max() ? 0 : remove_time)); + columns_[i++]->insert(static_cast(remove_time == std::numeric_limits::max() ? 0 : remove_time)); /// For convenience, in returned refcount, don't add references that was due to local variables in this method: all_parts, active_parts. - columns[i++]->insert(static_cast(part.use_count() - 1)); + columns_[i++]->insert(static_cast(part.use_count() - 1)); - columns[i++]->insert(part->getMinDate()); - columns[i++]->insert(part->getMaxDate()); - columns[i++]->insert(part->getMinTime()); - columns[i++]->insert(part->getMaxTime()); - columns[i++]->insert(part->info.partition_id); - columns[i++]->insert(part->info.min_block); - columns[i++]->insert(part->info.max_block); - columns[i++]->insert(part->info.level); - columns[i++]->insert(static_cast(part->info.getDataVersion())); - columns[i++]->insert(part->getIndexSizeInBytes()); - columns[i++]->insert(part->getIndexSizeInAllocatedBytes()); - columns[i++]->insert(part->is_frozen); + columns_[i++]->insert(part->getMinDate()); + columns_[i++]->insert(part->getMaxDate()); + columns_[i++]->insert(part->getMinTime()); + columns_[i++]->insert(part->getMaxTime()); + columns_[i++]->insert(part->info.partition_id); + columns_[i++]->insert(part->info.min_block); + columns_[i++]->insert(part->info.max_block); + columns_[i++]->insert(part->info.level); + columns_[i++]->insert(static_cast(part->info.getDataVersion())); + columns_[i++]->insert(part->getIndexSizeInBytes()); + columns_[i++]->insert(part->getIndexSizeInAllocatedBytes()); + columns_[i++]->insert(part->is_frozen); - columns[i++]->insert(info.database); - columns[i++]->insert(info.table); - columns[i++]->insert(info.engine); - columns[i++]->insert(part->getFullPath()); + columns_[i++]->insert(info.database); + columns_[i++]->insert(info.table); + columns_[i++]->insert(info.engine); + columns_[i++]->insert(part->getFullPath()); if (has_state_column) - columns[i++]->insert(part->stateString()); + columns_[i++]->insert(part->stateString()); MinimalisticDataPartChecksums helper; helper.computeTotalChecksums(part->checksums); auto checksum = helper.hash_of_all_files; - columns[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); + columns_[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); checksum = helper.hash_of_uncompressed_files; - columns[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); + columns_[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); checksum = helper.uncompressed_hash_of_compressed_files; - columns[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); + columns_[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); } } diff --git a/dbms/src/Storages/System/StorageSystemParts.h b/dbms/src/Storages/System/StorageSystemParts.h index f0c5071b1c5..eb1ded1c5d6 100644 --- a/dbms/src/Storages/System/StorageSystemParts.h +++ b/dbms/src/Storages/System/StorageSystemParts.h @@ -18,7 +18,7 @@ public: std::string getName() const override { return "SystemParts"; } protected: - explicit StorageSystemParts(const std::string & name); + explicit StorageSystemParts(const std::string & name_); void processNextStorage(MutableColumns & columns, const StoragesInfo & info, bool has_state_column) override; }; diff --git a/dbms/src/Storages/System/StorageSystemPartsBase.cpp b/dbms/src/Storages/System/StorageSystemPartsBase.cpp index 303a8ddd939..7a242e74e75 100644 --- a/dbms/src/Storages/System/StorageSystemPartsBase.cpp +++ b/dbms/src/Storages/System/StorageSystemPartsBase.cpp @@ -253,21 +253,21 @@ bool StorageSystemPartsBase::hasColumn(const String & column_name) const StorageSystemPartsBase::StorageSystemPartsBase(std::string name_, NamesAndTypesList && columns_) : name(std::move(name_)) { - ColumnsDescription columns(std::move(columns_)); + ColumnsDescription tmp_columns(std::move(columns_)); auto add_alias = [&](const String & alias_name, const String & column_name) { - ColumnDescription column(alias_name, columns.get(column_name).type, false); + ColumnDescription column(alias_name, tmp_columns.get(column_name).type, false); column.default_desc.kind = ColumnDefaultKind::Alias; column.default_desc.expression = std::make_shared(column_name); - columns.add(column); + tmp_columns.add(column); }; /// Add aliases for old column names for backwards compatibility. add_alias("bytes", "bytes_on_disk"); add_alias("marks_size", "marks_bytes"); - setColumns(columns); + setColumns(tmp_columns); } } diff --git a/dbms/src/Storages/System/StorageSystemPartsColumns.cpp b/dbms/src/Storages/System/StorageSystemPartsColumns.cpp index ab688b514e7..09229d79665 100644 --- a/dbms/src/Storages/System/StorageSystemPartsColumns.cpp +++ b/dbms/src/Storages/System/StorageSystemPartsColumns.cpp @@ -15,8 +15,8 @@ namespace DB { -StorageSystemPartsColumns::StorageSystemPartsColumns(const std::string & name) - : StorageSystemPartsBase(name, +StorageSystemPartsColumns::StorageSystemPartsColumns(const std::string & name_) + : StorageSystemPartsBase(name_, { {"partition", std::make_shared()}, {"name", std::make_shared()}, @@ -58,7 +58,7 @@ StorageSystemPartsColumns::StorageSystemPartsColumns(const std::string & name) { } -void StorageSystemPartsColumns::processNextStorage(MutableColumns & columns, const StoragesInfo & info, bool has_state_column) +void StorageSystemPartsColumns::processNextStorage(MutableColumns & columns_, const StoragesInfo & info, bool has_state_column) { /// Prepare information about columns in storage. struct ColumnInfo @@ -106,59 +106,59 @@ void StorageSystemPartsColumns::processNextStorage(MutableColumns & columns, con { WriteBufferFromOwnString out; part->partition.serializeText(*info.data, out, format_settings); - columns[j++]->insert(out.str()); + columns_[j++]->insert(out.str()); } - columns[j++]->insert(part->name); - columns[j++]->insert(part_state == State::Committed); - columns[j++]->insert(part->getMarksCount()); + columns_[j++]->insert(part->name); + columns_[j++]->insert(part_state == State::Committed); + columns_[j++]->insert(part->getMarksCount()); - columns[j++]->insert(part->rows_count); - columns[j++]->insert(part->bytes_on_disk.load(std::memory_order_relaxed)); - columns[j++]->insert(columns_size.data_compressed); - columns[j++]->insert(columns_size.data_uncompressed); - columns[j++]->insert(columns_size.marks); - columns[j++]->insert(UInt64(part->modification_time)); - columns[j++]->insert(UInt64(part->remove_time.load(std::memory_order_relaxed))); + columns_[j++]->insert(part->rows_count); + columns_[j++]->insert(part->bytes_on_disk.load(std::memory_order_relaxed)); + columns_[j++]->insert(columns_size.data_compressed); + columns_[j++]->insert(columns_size.data_uncompressed); + columns_[j++]->insert(columns_size.marks); + columns_[j++]->insert(UInt64(part->modification_time)); + columns_[j++]->insert(UInt64(part->remove_time.load(std::memory_order_relaxed))); - columns[j++]->insert(UInt64(use_count)); + columns_[j++]->insert(UInt64(use_count)); - columns[j++]->insert(min_date); - columns[j++]->insert(max_date); - columns[j++]->insert(part->info.partition_id); - columns[j++]->insert(part->info.min_block); - columns[j++]->insert(part->info.max_block); - columns[j++]->insert(part->info.level); - columns[j++]->insert(UInt64(part->info.getDataVersion())); - columns[j++]->insert(index_size_in_bytes); - columns[j++]->insert(index_size_in_allocated_bytes); + columns_[j++]->insert(min_date); + columns_[j++]->insert(max_date); + columns_[j++]->insert(part->info.partition_id); + columns_[j++]->insert(part->info.min_block); + columns_[j++]->insert(part->info.max_block); + columns_[j++]->insert(part->info.level); + columns_[j++]->insert(UInt64(part->info.getDataVersion())); + columns_[j++]->insert(index_size_in_bytes); + columns_[j++]->insert(index_size_in_allocated_bytes); - columns[j++]->insert(info.database); - columns[j++]->insert(info.table); - columns[j++]->insert(info.engine); - columns[j++]->insert(part->getFullPath()); - columns[j++]->insert(column.name); - columns[j++]->insert(column.type->getName()); + columns_[j++]->insert(info.database); + columns_[j++]->insert(info.table); + columns_[j++]->insert(info.engine); + columns_[j++]->insert(part->getFullPath()); + columns_[j++]->insert(column.name); + columns_[j++]->insert(column.type->getName()); auto column_info_it = columns_info.find(column.name); if (column_info_it != columns_info.end()) { - columns[j++]->insert(column_info_it->second.default_kind); - columns[j++]->insert(column_info_it->second.default_expression); + columns_[j++]->insert(column_info_it->second.default_kind); + columns_[j++]->insert(column_info_it->second.default_expression); } else { - columns[j++]->insertDefault(); - columns[j++]->insertDefault(); + columns_[j++]->insertDefault(); + columns_[j++]->insertDefault(); } ColumnSize column_size = part->getColumnSize(column.name, *column.type); - columns[j++]->insert(column_size.data_compressed + column_size.marks); - columns[j++]->insert(column_size.data_compressed); - columns[j++]->insert(column_size.data_uncompressed); - columns[j++]->insert(column_size.marks); + columns_[j++]->insert(column_size.data_compressed + column_size.marks); + columns_[j++]->insert(column_size.data_compressed); + columns_[j++]->insert(column_size.data_uncompressed); + columns_[j++]->insert(column_size.marks); if (has_state_column) - columns[j++]->insert(part->stateString()); + columns_[j++]->insert(part->stateString()); } } } diff --git a/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp b/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp index 4ac81521b0b..55ca3274278 100644 --- a/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp +++ b/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp @@ -108,8 +108,8 @@ void StorageSystemReplicationQueue::fillData(MutableColumns & res_columns, const Array parts_to_merge; parts_to_merge.reserve(entry.source_parts.size()); - for (const auto & name : entry.source_parts) - parts_to_merge.push_back(name); + for (const auto & part_name : entry.source_parts) + parts_to_merge.push_back(part_name); size_t col_num = 0; res_columns[col_num++]->insert(database); diff --git a/dbms/src/Storages/System/StorageSystemTableFunctions.cpp b/dbms/src/Storages/System/StorageSystemTableFunctions.cpp index 367595e9742..65b1dc41879 100644 --- a/dbms/src/Storages/System/StorageSystemTableFunctions.cpp +++ b/dbms/src/Storages/System/StorageSystemTableFunctions.cpp @@ -12,9 +12,9 @@ NamesAndTypesList StorageSystemTableFunctions::getNamesAndTypes() void StorageSystemTableFunctions::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const { const auto & functions_names = TableFunctionFactory::instance().getAllRegisteredNames(); - for (const auto & name : functions_names) + for (const auto & function_name : functions_names) { - res_columns[0]->insert(name); + res_columns[0]->insert(function_name); } } diff --git a/dbms/src/Storages/System/StorageSystemTables.cpp b/dbms/src/Storages/System/StorageSystemTables.cpp index e962b9883b3..113c09165d9 100644 --- a/dbms/src/Storages/System/StorageSystemTables.cpp +++ b/dbms/src/Storages/System/StorageSystemTables.cpp @@ -65,12 +65,12 @@ class TablesBlockInputStream : public IBlockInputStream { public: TablesBlockInputStream( - std::vector columns_mask, - Block header, - UInt64 max_block_size, - ColumnPtr databases, - const Context & context) - : columns_mask(std::move(columns_mask)), header(std::move(header)), max_block_size(max_block_size), databases(std::move(databases)), context(context) {} + std::vector columns_mask_, + Block header_, + UInt64 max_block_size_, + ColumnPtr databases_, + const Context & context_) + : columns_mask(std::move(columns_mask_)), header(std::move(header_)), max_block_size(max_block_size_), databases(std::move(databases_)), context(context_) {} String getName() const override { return "Tables"; } Block getHeader() const override { return header; } diff --git a/dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp b/dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp index c17676bc655..e61ab6279a2 100644 --- a/dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp +++ b/dbms/src/Storages/tests/gtest_transform_query_for_external_database.cpp @@ -69,3 +69,10 @@ TEST(TransformQueryForExternalDatabase, Like) "SELECT \"column\" FROM \"test\".\"table\" WHERE \"column\" NOT LIKE 'w%rld'", state().context, state().columns); } + +TEST(TransformQueryForExternalDatabase, Substring) +{ + check("SELECT column FROM test.table WHERE left(column, 10) = RIGHT(column, 10) AND SUBSTRING(column FROM 1 FOR 2) = 'Hello'", + "SELECT \"column\" FROM \"test\".\"table\"", + state().context, state().columns); +} diff --git a/dbms/src/Storages/tests/remove_symlink_directory.cpp b/dbms/src/Storages/tests/remove_symlink_directory.cpp index 8098ee5dc32..b455357863e 100644 --- a/dbms/src/Storages/tests/remove_symlink_directory.cpp +++ b/dbms/src/Storages/tests/remove_symlink_directory.cpp @@ -22,7 +22,7 @@ try Poco::File("./test_dir/file").createFile(); if (0 != symlink("./test_dir", "./test_link")) - DB::throwFromErrno("Cannot create symlink", DB::ErrorCodes::SYSTEM_ERROR); + DB::throwFromErrnoWithPath("Cannot create symlink", "./test_link", DB::ErrorCodes::SYSTEM_ERROR); Poco::File link("./test_link"); link.renameTo("./test_link2"); diff --git a/dbms/src/TableFunctions/TableFunctionRemote.cpp b/dbms/src/TableFunctions/TableFunctionRemote.cpp index c4e0cd1866a..9d0a8024c0e 100644 --- a/dbms/src/TableFunctions/TableFunctionRemote.cpp +++ b/dbms/src/TableFunctions/TableFunctionRemote.cpp @@ -67,7 +67,7 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & ast_function, const C } else { - if (!getIdentifierName(args[arg_num], cluster_name)) + if (!tryGetIdentifierNameInto(args[arg_num], cluster_name)) cluster_description = getStringLiteral(*args[arg_num], "Hosts pattern"); } ++arg_num; @@ -180,8 +180,8 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & ast_function, const C } -TableFunctionRemote::TableFunctionRemote(const std::string & name, bool secure) - : name{name}, secure{secure} +TableFunctionRemote::TableFunctionRemote(const std::string & name_, bool secure_) + : name{name_}, secure{secure_} { is_cluster_function = name == "cluster"; diff --git a/dbms/src/TableFunctions/TableFunctionRemote.h b/dbms/src/TableFunctions/TableFunctionRemote.h index c9a98cbbc16..ef2e5cf190c 100644 --- a/dbms/src/TableFunctions/TableFunctionRemote.h +++ b/dbms/src/TableFunctions/TableFunctionRemote.h @@ -16,7 +16,7 @@ namespace DB class TableFunctionRemote : public ITableFunction { public: - TableFunctionRemote(const std::string & name, bool secure = false); + TableFunctionRemote(const std::string & name_, bool secure_ = false); std::string getName() const override { return name; } diff --git a/dbms/tests/integration/test_storage_kafka/test.py b/dbms/tests/integration/test_storage_kafka/test.py index 9be725d33b7..f066dc34a7f 100644 --- a/dbms/tests/integration/test_storage_kafka/test.py +++ b/dbms/tests/integration/test_storage_kafka/test.py @@ -122,6 +122,7 @@ def kafka_setup_teardown(): # Tests +@pytest.mark.timeout(60) def test_kafka_settings_old_syntax(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -136,14 +137,15 @@ def test_kafka_settings_old_syntax(kafka_cluster): kafka_produce('old', messages) result = '' - for i in range(50): - result += instance.query('SELECT * FROM test.kafka') + while True: + result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break - time.sleep(0.5) + kafka_check_result(result, True) -@pytest.mark.skip(reason="fails for some reason") + +@pytest.mark.timeout(60) def test_kafka_settings_new_syntax(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -171,14 +173,15 @@ def test_kafka_settings_new_syntax(kafka_cluster): kafka_produce('new', messages) result = '' - for i in range(50): - result += instance.query('SELECT * FROM test.kafka') + while True: + result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break - time.sleep(0.5) + kafka_check_result(result, True) +@pytest.mark.timeout(60) def test_kafka_csv_with_delimiter(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -196,14 +199,15 @@ def test_kafka_csv_with_delimiter(kafka_cluster): kafka_produce('csv', messages) result = '' - for i in range(50): - result += instance.query('SELECT * FROM test.kafka') + while True: + result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break - time.sleep(0.5) + kafka_check_result(result, True) +@pytest.mark.timeout(60) def test_kafka_tsv_with_delimiter(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -221,14 +225,15 @@ def test_kafka_tsv_with_delimiter(kafka_cluster): kafka_produce('tsv', messages) result = '' - for i in range(50): - result += instance.query('SELECT * FROM test.kafka') + while True: + result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break - time.sleep(0.5) + kafka_check_result(result, True) +@pytest.mark.timeout(60) def test_kafka_json_without_delimiter(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -250,14 +255,15 @@ def test_kafka_json_without_delimiter(kafka_cluster): kafka_produce('json', [messages]) result = '' - for i in range(50): - result += instance.query('SELECT * FROM test.kafka') + while True: + result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break - time.sleep(0.5) + kafka_check_result(result, True) +@pytest.mark.timeout(60) def test_kafka_protobuf(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value String) @@ -274,14 +280,15 @@ def test_kafka_protobuf(kafka_cluster): kafka_produce_protobuf_messages('pb', 21, 29) result = '' - for i in range(50): - result += instance.query('SELECT * FROM test.kafka') + while True: + result += instance.query('SELECT * FROM test.kafka', ignore_error=True) if kafka_check_result(result): break - time.sleep(0.5) + kafka_check_result(result, True) +@pytest.mark.timeout(60) def test_kafka_materialized_view(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.view; @@ -305,19 +312,20 @@ def test_kafka_materialized_view(kafka_cluster): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce('mv', messages) - for i in range(50): + while True: result = instance.query('SELECT * FROM test.view') if kafka_check_result(result): break - time.sleep(0.5) - kafka_check_result(result, True) instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') -@pytest.mark.skip(reason="Hungs") + kafka_check_result(result, True) + + +@pytest.mark.timeout(300) def test_kafka_flush_on_big_message(kafka_cluster): # Create batchs of messages of size ~100Kb kafka_messages = 1000 @@ -354,15 +362,20 @@ def test_kafka_flush_on_big_message(kafka_cluster): except kafka.errors.GroupCoordinatorNotAvailableError: continue - for i in range(50): + while True: result = instance.query('SELECT count() FROM test.view') if int(result) == kafka_messages*batch_messages: break - time.sleep(0.5) + + instance.query(''' + DROP TABLE test.consumer; + DROP TABLE test.view; + ''') assert int(result) == kafka_messages*batch_messages, 'ClickHouse lost some messages: {}'.format(result) +@pytest.mark.timeout(60) def test_kafka_virtual_columns(kafka_cluster): instance.query(''' CREATE TABLE test.kafka (key UInt64, value UInt64) @@ -384,14 +397,15 @@ def test_kafka_virtual_columns(kafka_cluster): kafka_produce('virt1', [messages]) result = '' - for i in range(50): - result += instance.query('SELECT _key, key, _topic, value, _offset FROM test.kafka') + while True: + result += instance.query('SELECT _key, key, _topic, value, _offset FROM test.kafka', ignore_error=True) if kafka_check_result(result, False, 'test_kafka_virtual1.reference'): break - time.sleep(0.5) + kafka_check_result(result, True, 'test_kafka_virtual1.reference') +@pytest.mark.timeout(60) def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): instance.query(''' DROP TABLE IF EXISTS test.view; @@ -415,18 +429,18 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce('virt2', messages) - for i in range(50): + while True: result = instance.query('SELECT kafka_key, key, topic, value, offset FROM test.view') if kafka_check_result(result, False, 'test_kafka_virtual2.reference'): break - time.sleep(0.5) - kafka_check_result(result, True, 'test_kafka_virtual2.reference') instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') + kafka_check_result(result, True, 'test_kafka_virtual2.reference') + if __name__ == '__main__': cluster.start() diff --git a/dbms/tests/performance/linear_regression.xml b/dbms/tests/performance/linear_regression.xml index 409f120cd42..a04683c2b60 100644 --- a/dbms/tests/performance/linear_regression.xml +++ b/dbms/tests/performance/linear_regression.xml @@ -19,17 +19,17 @@ DROP TABLE IF EXISTS test_model CREATE TABLE test_model engine = Memory as select stochasticLinearRegressionState(0.0001)(Age, Income, ParamPrice, Robotness, RefererHash) as state from test.hits - + WITH (SELECT stochasticLinearRegressionState(0.0001, 0, 15)(Age, Income, ParamPrice, Robotness, RefererHash) FROM test.hits) AS model SELECT 1 SELECT stochasticLinearRegression(Age, Income, ParamPrice, Robotness, RefererHash) FROM test.hits - + WITH (SELECT stochasticLinearRegressionState(0.0001, 0, 15, 'Momentum')(Age, Income, ParamPrice, Robotness, RefererHash) FROM test.hits) AS model SELECT 1 - + WITH (SELECT stochasticLinearRegressionState(0.0001, 0, 15, 'Nesterov')(Age, Income, ParamPrice, Robotness, RefererHash) FROM test.hits) AS model SELECT 1 - + with (SELECT state FROM test_model) as model select evalMLMethod(model, Income, ParamPrice, Robotness, RefererHash) from test.hits DROP TABLE IF EXISTS test_model diff --git a/dbms/tests/performance/order_by_decimals.xml b/dbms/tests/performance/order_by_decimals.xml new file mode 100644 index 00000000000..ad6937cd1d6 --- /dev/null +++ b/dbms/tests/performance/order_by_decimals.xml @@ -0,0 +1,30 @@ + + + sorting + comparison + + + loop + + + + 5 + 10000 + + + 50 + 60000 + + + + SELECT toInt32(number) AS n FROM numbers(1000000) ORDER BY n DESC + SELECT toDecimal32(number, 0) AS n FROM numbers(1000000) ORDER BY n + + SELECT toDecimal32(number, 0) AS n FROM numbers(1000000) ORDER BY n DESC + SELECT toDecimal64(number, 8) AS n FROM numbers(1000000) ORDER BY n DESC + SELECT toDecimal128(number, 10) AS n FROM numbers(1000000) ORDER BY n DESC + + + + + diff --git a/dbms/tests/queries/0_stateless/00051_any_inner_join.sql b/dbms/tests/queries/0_stateless/00051_any_inner_join.sql index 986c798d763..566b5ad526b 100644 --- a/dbms/tests/queries/0_stateless/00051_any_inner_join.sql +++ b/dbms/tests/queries/0_stateless/00051_any_inner_join.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT a.*, b.* FROM ( SELECT number AS k FROM system.numbers LIMIT 10 diff --git a/dbms/tests/queries/0_stateless/00098_l_union_all.sql b/dbms/tests/queries/0_stateless/00098_l_union_all.sql index 9a301297786..414baba92fa 100644 --- a/dbms/tests/queries/0_stateless/00098_l_union_all.sql +++ b/dbms/tests/queries/0_stateless/00098_l_union_all.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT a,b,c,d FROM (SELECT 1 AS a,2 AS b, 3 AS c UNION ALL SELECT 2,3,4 ) ANY INNER JOIN (SELECT 1 AS a,2 AS b,4 AS d UNION ALL SELECT 2,3,5) USING (a) ORDER BY a,b,c,d ASC; SELECT a,b,c,d FROM (SELECT 1 AS a,2 AS b, 3 AS c UNION ALL SELECT 2,3,4 ) ALL LEFT JOIN (SELECT 1 AS a,2 AS b,4 AS d UNION ALL SELECT 2,3,5) USING (a) ORDER BY a,b,c,d ASC; SELECT a,b,c,d FROM (SELECT 1 AS a,2 AS b, 3 AS c UNION ALL SELECT 2,3,4 ) ALL LEFT JOIN (SELECT 1 AS a,2 AS b,4 AS d UNION ALL SELECT 2,3,5) USING a,b ORDER BY a,b,c,d ASC; diff --git a/dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql b/dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql index af4e8e79ff6..b6e1a260d25 100644 --- a/dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql +++ b/dbms/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT number, number / 2 AS n, j1, j2 FROM remote('127.0.0.{2,3}', system.numbers) ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) USING n LIMIT 10; SELECT dummy + 2 AS number, number / 2 AS n, j1, j2 FROM remote('127.0.0.{2,3}', system.one) ANY INNER JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) USING n LIMIT 10; SELECT number, number / 2 AS n, j1, j2 FROM remote('127.0.0.{2,3}', system.numbers) GLOBAL ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) USING n LIMIT 10; diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql b/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql index 4938a633810..8e87f49a7a2 100644 --- a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql +++ b/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + DROP TABLE IF EXISTS series; CREATE TABLE series(i UInt32, x_value Float64, y_value Float64) ENGINE = Memory; diff --git a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql b/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql index 362d300f1d6..f51722f7f33 100644 --- a/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql +++ b/dbms/tests/queries/0_stateless/00181_aggregate_functions_statistics_stable.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + DROP TABLE IF EXISTS series; CREATE TABLE series(i UInt32, x_value Float64, y_value Float64) ENGINE = Memory; diff --git a/dbms/tests/queries/0_stateless/00203_full_join.sql b/dbms/tests/queries/0_stateless/00203_full_join.sql index ace766ab324..f664ae3bd95 100644 --- a/dbms/tests/queries/0_stateless/00203_full_join.sql +++ b/dbms/tests/queries/0_stateless/00203_full_join.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT k, x, y FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY FULL JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; SELECT k, x FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY FULL JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; SELECT k, y FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY FULL JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; @@ -20,8 +22,11 @@ INSERT INTO t1_00203 VALUES (1, 2, 3, 'aaa'), (2, 3, 4, 'bbb'); INSERT INTO t2_00203 VALUES ('ccc', 4, 3, 2), ('ddd', 7, 6, 5); SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY FULL JOIN t2_00203 USING (k3, k1, k2 AS k2_alias) ORDER BY k1, k2, k3; - SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY RIGHT JOIN t2_00203 USING (k3, k1, k2 AS k2_alias) ORDER BY k1, k2, k3; +SET any_join_distinct_right_table_keys = 0; +SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY FULL JOIN t2_00203 USING (k3, k1, k2 AS k2_alias) ORDER BY k1, k2, k3; -- { serverError 48 } +SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY RIGHT JOIN t2_00203 USING (k3, k1, k2 AS k2_alias) ORDER BY k1, k2, k3; -- { serverError 48 } + DROP TABLE t1_00203; DROP TABLE t2_00203; diff --git a/dbms/tests/queries/0_stateless/00209_insert_select_extremes.reference b/dbms/tests/queries/0_stateless/00209_insert_select_extremes.reference index 5b1dd0e97cf..53743ca209b 100644 --- a/dbms/tests/queries/0_stateless/00209_insert_select_extremes.reference +++ b/dbms/tests/queries/0_stateless/00209_insert_select_extremes.reference @@ -1,11 +1 @@ - -1 -1 - -1 - -1 - -1 -1 4 1 1 diff --git a/dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.reference b/dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.reference index 016f3290af0..e69de29bb2d 100644 --- a/dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.reference +++ b/dbms/tests/queries/0_stateless/00210_insert_select_extremes_http.reference @@ -1,3 +0,0 @@ - -1 -1 diff --git a/dbms/tests/queries/0_stateless/00219_full_right_join_column_order.sql b/dbms/tests/queries/0_stateless/00219_full_right_join_column_order.sql index d033d197cf7..9c7485992f6 100644 --- a/dbms/tests/queries/0_stateless/00219_full_right_join_column_order.sql +++ b/dbms/tests/queries/0_stateless/00219_full_right_join_column_order.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT a, b FROM (SELECT 1 AS a, 2000 AS b) ANY RIGHT JOIN (SELECT 2 AS a, 3000 AS b) USING a, b; SELECT a, b FROM (SELECT 1 AS a, 2000 AS b) ANY RIGHT JOIN (SELECT 2 AS a, 3000 AS b) USING b, a; diff --git a/dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql b/dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql index 2c13433a5c5..45dc76cb41d 100644 --- a/dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql +++ b/dbms/tests/queries/0_stateless/00316_rounding_functions_and_empty_block.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT floor((ReferrerTimestamp - InstallTimestamp) / 86400) AS DaysSinceInstallations FROM diff --git a/dbms/tests/queries/0_stateless/00353_join_by_tuple.sql b/dbms/tests/queries/0_stateless/00353_join_by_tuple.sql index 96565ff8430..5550effec2c 100644 --- a/dbms/tests/queries/0_stateless/00353_join_by_tuple.sql +++ b/dbms/tests/queries/0_stateless/00353_join_by_tuple.sql @@ -1 +1,2 @@ +set any_join_distinct_right_table_keys = 1; select a from (select (1, 2) as a) any inner join (select (1, 2) as a) using a; diff --git a/dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql b/dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql index 44506d772a3..fb6b6ccfe34 100644 --- a/dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql +++ b/dbms/tests/queries/0_stateless/00370_duplicate_columns_in_subqueries.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + select x, y from (select 1 as x, 2 as y, x, y); select x, y from (select 1 as x, 1 as y, x, y); select x from (select 1 as x, 1 as y, x, y); diff --git a/dbms/tests/queries/0_stateless/00444_join_use_nulls.sql b/dbms/tests/queries/0_stateless/00444_join_use_nulls.sql index df5d3d04a25..1992c3cbcc9 100644 --- a/dbms/tests/queries/0_stateless/00444_join_use_nulls.sql +++ b/dbms/tests/queries/0_stateless/00444_join_use_nulls.sql @@ -1,4 +1,5 @@ SET join_use_nulls = 0; +SET any_join_distinct_right_table_keys = 1; SELECT k, a, b FROM diff --git a/dbms/tests/queries/0_stateless/00445_join_nullable_keys.sql b/dbms/tests/queries/0_stateless/00445_join_nullable_keys.sql index 58670f014a0..fa5fec58364 100644 --- a/dbms/tests/queries/0_stateless/00445_join_nullable_keys.sql +++ b/dbms/tests/queries/0_stateless/00445_join_nullable_keys.sql @@ -1,4 +1,5 @@ SET join_use_nulls = 0; +SET any_join_distinct_right_table_keys = 1; SELECT k, a, b FROM diff --git a/dbms/tests/queries/0_stateless/00550_join_insert_select.sh b/dbms/tests/queries/0_stateless/00550_join_insert_select.sh index 827248d6c7e..168dad8ddc0 100755 --- a/dbms/tests/queries/0_stateless/00550_join_insert_select.sh +++ b/dbms/tests/queries/0_stateless/00550_join_insert_select.sh @@ -14,7 +14,7 @@ INSERT INTO test2_00550 VALUES ('a'); CREATE TABLE test3_00550 ( id String, name String ) ENGINE = StripeLog; INSERT INTO test3_00550 VALUES ('a', 'aaa'); -INSERT INTO test1_00550 SELECT id, name FROM test2_00550 ANY INNER JOIN test3_00550 USING (id); +INSERT INTO test1_00550 SELECT id, name FROM test2_00550 ANY INNER JOIN test3_00550 USING (id) SETTINGS any_join_distinct_right_table_keys=1; INSERT INTO test1_00550 SELECT id, name FROM test2_00550 ANY LEFT OUTER JOIN test3_00550 USING (id); DROP TABLE test1_00550; diff --git a/dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.sql b/dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.sql index 1ec82d5f1b4..72ccbd4e58d 100644 --- a/dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.sql +++ b/dbms/tests/queries/0_stateless/00555_right_join_excessive_rows.sql @@ -1,2 +1,3 @@ +SET any_join_distinct_right_table_keys = 1; SET max_block_size = 10; SELECT * FROM (select toUInt64(1) s limit 1) any right join (select number s, s as x from numbers(11)) using (s) ORDER BY s; diff --git a/dbms/tests/queries/0_stateless/00561_storage_join.sql b/dbms/tests/queries/0_stateless/00561_storage_join.sql index f218f9a0bc2..bcbaaded9b0 100644 --- a/dbms/tests/queries/0_stateless/00561_storage_join.sql +++ b/dbms/tests/queries/0_stateless/00561_storage_join.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + drop table IF EXISTS joinbug; CREATE TABLE joinbug ( diff --git a/dbms/tests/queries/0_stateless/00577_full_join_segfault.sql b/dbms/tests/queries/0_stateless/00577_full_join_segfault.sql index a53c9ffe4eb..d2fdab7c1c3 100644 --- a/dbms/tests/queries/0_stateless/00577_full_join_segfault.sql +++ b/dbms/tests/queries/0_stateless/00577_full_join_segfault.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT k, a1, b1, a2, b2 FROM (SELECT 0 AS k, 'hello' AS a1, 123 AS b1, a1) ANY FULL OUTER JOIN (SELECT 1 AS k, 'hello' AS a2, 456 AS b2, a2) USING (k) ORDER BY k; SELECT k, a, b FROM (SELECT 0 AS k, 'hello' AS a, 123 AS b, a) ANY FULL OUTER JOIN (SELECT 1 AS k) USING (k) ORDER BY k; diff --git a/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql b/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql index 715e43fa96a..07ea077a460 100644 --- a/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql +++ b/dbms/tests/queries/0_stateless/00586_removing_unused_columns_from_subquery.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + DROP TABLE IF EXISTS local_statements; DROP TABLE IF EXISTS statements; diff --git a/dbms/tests/queries/0_stateless/00597_push_down_predicate.sql b/dbms/tests/queries/0_stateless/00597_push_down_predicate.sql index 0c3fb6a3bc3..e0d92b87e5c 100644 --- a/dbms/tests/queries/0_stateless/00597_push_down_predicate.sql +++ b/dbms/tests/queries/0_stateless/00597_push_down_predicate.sql @@ -1,4 +1,5 @@ SET send_logs_level = 'none'; +SET any_join_distinct_right_table_keys = 1; DROP TABLE IF EXISTS test_00597; DROP TABLE IF EXISTS test_view_00597; diff --git a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index eb418277c8f..c8cd8606cc1 100755 --- a/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh +++ b/dbms/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -80,7 +80,7 @@ FROM # Check per-thread and per-query ProfileEvents consistency -$CLICKHOUSE_CLIENT $settings -q " +$CLICKHOUSE_CLIENT $settings --any_join_distinct_right_table_keys=1 -q " SELECT PN, PVq, PVt FROM ( SELECT PN, sum(PV) AS PVt diff --git a/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql b/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql index f3b664e64cc..6ac119bdf48 100644 --- a/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql +++ b/dbms/tests/queries/0_stateless/00679_replace_asterisk.sql @@ -1,3 +1,5 @@ +set any_join_distinct_right_table_keys = 1; + SELECT * FROM (SELECT 1 AS id, 2 AS value); SELECT * FROM (SELECT 1 AS id, 2 AS value, 3 AS A) ANY INNER JOIN (SELECT 1 AS id, 4 AS values, 5 AS D) USING id; SELECT *, d.* FROM ( SELECT 1 AS id, 2 AS value ) ANY INNER JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; diff --git a/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql b/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql index 3d598363684..abf2903d3ea 100644 --- a/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql +++ b/dbms/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql @@ -12,6 +12,7 @@ SYSTEM SYNC REPLICA byte_identical_r2; ALTER TABLE byte_identical_r1 ADD COLUMN y DEFAULT rand(); OPTIMIZE TABLE byte_identical_r1 PARTITION tuple() FINAL; +SET any_join_distinct_right_table_keys = 1; SELECT x, t1.y - t2.y FROM byte_identical_r1 t1 ANY INNER JOIN byte_identical_r2 t2 USING x ORDER BY x; DROP TABLE byte_identical_r1; diff --git a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql b/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql index ba746b62c09..5666d362912 100644 --- a/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql +++ b/dbms/tests/queries/0_stateless/00819_full_join_wrong_columns_in_block.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT * FROM (SELECT 1 AS a, 'x' AS b) join (SELECT 1 as a, 'y' as b) using a; SELECT * FROM (SELECT 1 AS a, 'x' AS b) left join (SELECT 1 as a, 'y' as b) using a; SELECT * FROM (SELECT 1 AS a, 'x' AS b) full join (SELECT 1 as a, 'y' as b) using a; @@ -7,3 +9,7 @@ SELECT * FROM (SELECT 1 AS a, 'x' AS b) any join (SELECT 1 as a, 'y' as b) using SELECT * FROM (SELECT 1 AS a, 'x' AS b) any left join (SELECT 1 as a, 'y' as b) using a; SELECT * FROM (SELECT 1 AS a, 'x' AS b) any full join (SELECT 1 as a, 'y' as b) using a; SELECT * FROM (SELECT 1 AS a, 'x' AS b) any right join (SELECT 1 as a, 'y' as b) using a; + +SET any_join_distinct_right_table_keys = 0; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any full join (SELECT 1 as a, 'y' as b) using a; -- { serverError 48 } +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any right join (SELECT 1 as a, 'y' as b) using a; -- { serverError 48 } diff --git a/dbms/tests/queries/0_stateless/00829_bitmap_function.reference b/dbms/tests/queries/0_stateless/00829_bitmap_function.reference index bd5d279919b..da1206bab12 100644 --- a/dbms/tests/queries/0_stateless/00829_bitmap_function.reference +++ b/dbms/tests/queries/0_stateless/00829_bitmap_function.reference @@ -59,3 +59,11 @@ 1 0 1 +[] +[] +[1] +[] +[5] +[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33] +[30,31,32,33,100] +[100] diff --git a/dbms/tests/queries/0_stateless/00829_bitmap_function.sql b/dbms/tests/queries/0_stateless/00829_bitmap_function.sql index 19d00c68415..247a9ba3960 100644 --- a/dbms/tests/queries/0_stateless/00829_bitmap_function.sql +++ b/dbms/tests/queries/0_stateless/00829_bitmap_function.sql @@ -177,18 +177,37 @@ select bitmapHasAll(bitmapBuild([ -- bitmapContains: ---- Empty -SELECT bitmapContains(bitmapBuild(emptyArrayUInt32()), CAST(0, 'UInt32')); -SELECT bitmapContains(bitmapBuild(emptyArrayUInt16()), CAST(5, 'UInt32')); +SELECT bitmapContains(bitmapBuild(emptyArrayUInt32()), toUInt32(0)); +SELECT bitmapContains(bitmapBuild(emptyArrayUInt16()), toUInt32(5)); ---- Small -select bitmapContains(bitmapBuild([1,5,7,9]),CAST(0, 'UInt32')); -select bitmapContains(bitmapBuild([1,5,7,9]),CAST(9, 'UInt32')); +select bitmapContains(bitmapBuild([1,5,7,9]),toUInt32(0)); +select bitmapContains(bitmapBuild([1,5,7,9]),toUInt32(9)); ---- Large select bitmapContains(bitmapBuild([ 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, - 100,200,500]),CAST(100, 'UInt32')); + 100,200,500]),toUInt32(100)); select bitmapContains(bitmapBuild([ 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, - 100,200,500]),CAST(101, 'UInt32')); + 100,200,500]),toUInt32(101)); select bitmapContains(bitmapBuild([ 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, - 100,200,500]),CAST(500, 'UInt32')); + 100,200,500]),toUInt32(500)); + +-- bitmapSubsetInRange: +---- Empty +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild(emptyArrayUInt32()), toUInt32(0), toUInt32(10))); +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild(emptyArrayUInt16()), toUInt32(0), toUInt32(10))); +---- Small +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([1,5,7,9]), toUInt32(0), toUInt32(4))); +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([1,5,7,9]), toUInt32(10), toUInt32(10))); +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([1,5,7,9]), toUInt32(3), toUInt32(7))); +---- Large +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(0), toUInt32(100))); +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(30), toUInt32(200))); +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(100), toUInt32(200))); diff --git a/dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.sql b/dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.sql index 3409e67cbfe..f2968654782 100644 --- a/dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.sql +++ b/dbms/tests/queries/0_stateless/00846_join_using_tuple_crash.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT * FROM (SELECT dummy as a, (toUInt8(0), toUInt8(0)) AS tup FROM system.one) JOIN (SELECT dummy as a, (toUInt8(0), toUInt8(0)) AS tup FROM system.one) USING (a, tup); diff --git a/dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql b/dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql index 4af9deb26db..d809950283d 100644 --- a/dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql +++ b/dbms/tests/queries/0_stateless/00848_join_use_nulls_segfault.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + DROP TABLE IF EXISTS t1_00848; DROP TABLE IF EXISTS t2_00848; DROP TABLE IF EXISTS t3_00848; diff --git a/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql b/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql index 057188f4f5a..23c41549502 100644 --- a/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql +++ b/dbms/tests/queries/0_stateless/00859_distinct_with_join.sql @@ -1,3 +1,5 @@ +set any_join_distinct_right_table_keys = 1; + drop table if exists fooL; drop table if exists fooR; create table fooL (a Int32, v String) engine = Memory; diff --git a/dbms/tests/queries/0_stateless/00880_decimal_in_key.reference b/dbms/tests/queries/0_stateless/00880_decimal_in_key.reference new file mode 100644 index 00000000000..fcd78da1283 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00880_decimal_in_key.reference @@ -0,0 +1,2 @@ +1000000 +1000000 diff --git a/dbms/tests/queries/0_stateless/00880_decimal_in_key.sql b/dbms/tests/queries/0_stateless/00880_decimal_in_key.sql new file mode 100644 index 00000000000..44edf725a41 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00880_decimal_in_key.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (str String, dec Decimal64(8)) ENGINE = MergeTree ORDER BY str; +CREATE TABLE t2 (str String, dec Decimal64(8)) ENGINE = MergeTree ORDER BY dec; + +INSERT INTO t1 SELECT toString(number), toDecimal64(number, 8) FROM system.numbers LIMIT 1000000; +SELECT count() FROM t1; + +INSERT INTO t2 SELECT toString(number), toDecimal64(number, 8) FROM system.numbers LIMIT 1000000; +SELECT count() FROM t2; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql b/dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql index a30aee7f61f..22eeb2c51dc 100644 --- a/dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql +++ b/dbms/tests/queries/0_stateless/00916_join_using_duplicate_columns.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT * FROM (SELECT 1 AS x) ALL LEFT JOIN (SELECT 1 AS x) USING x; SELECT * FROM (SELECT 1 AS x) ALL LEFT JOIN (SELECT 2 AS x) USING x; diff --git a/dbms/tests/queries/0_stateless/00927_table_filter.sql b/dbms/tests/queries/0_stateless/00927_table_filter.sql index 8fef82c55c8..ff9703be6cd 100644 --- a/dbms/tests/queries/0_stateless/00927_table_filter.sql +++ b/dbms/tests/queries/0_stateless/00927_table_filter.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + DROP TABLE IF EXISTS test.filtered_table1; DROP TABLE IF EXISTS test.filtered_table2; DROP TABLE IF EXISTS test.filtered_table3; diff --git a/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql b/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql index 94fc3042f42..54741465a1d 100644 --- a/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql +++ b/dbms/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql @@ -1,3 +1,4 @@ +SET any_join_distinct_right_table_keys = 1; SET join_use_nulls = 1; SELECT number FROM system.numbers ANY INNER JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) USING (number) LIMIT 1; -SELECT number FROM system.numbers ANY LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) USING (number) LIMIT 1; \ No newline at end of file +SELECT number FROM system.numbers ANY LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) USING (number) LIMIT 1; diff --git a/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.reference b/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.reference new file mode 100644 index 00000000000..7d9895ef9f3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.reference @@ -0,0 +1,6 @@ +7777 +7777 +7777 +7777 +7777.000 +7777.000 diff --git a/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.sql b/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.sql new file mode 100644 index 00000000000..9b3bdf91573 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00979_toFloat_monotonicity.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS test3; + +CREATE TABLE test1 (n UInt64) ENGINE = MergeTree ORDER BY n SETTINGS index_granularity = 1; +CREATE TABLE test2 (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; +CREATE TABLE test3 (d Decimal(4, 3)) ENGINE = MergeTree ORDER BY d SETTINGS index_granularity = 1; + +INSERT INTO test1 SELECT * FROM numbers(10000); +SELECT n FROM test1 WHERE toFloat64(n) = 7777.0 SETTINGS max_rows_to_read = 2; +SELECT n FROM test1 WHERE toFloat32(n) = 7777.0 SETTINGS max_rows_to_read = 2; + +INSERT INTO test2 SELECT toString(number) FROM numbers(10000); +SELECT s FROM test2 WHERE toFloat64(s) = 7777.0; +SELECT s FROM test2 WHERE toFloat32(s) = 7777.0; + +INSERT INTO test3 SELECT toDecimal64(number, 3) FROM numbers(10000); +SELECT d FROM test3 WHERE toFloat64(d) = 7777.0 SETTINGS max_rows_to_read = 2; +SELECT d FROM test3 WHERE toFloat32(d) = 7777.0 SETTINGS max_rows_to_read = 2; + +DROP TABLE test1; +DROP TABLE test2; +DROP TABLE test3; diff --git a/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.reference b/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.reference new file mode 100644 index 00000000000..be6e399c4d9 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.reference @@ -0,0 +1,7 @@ +1.00 +1.00 +1.00 +1.00 +1.00 +1.00 +1.00 diff --git a/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.sql b/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.sql new file mode 100644 index 00000000000..f71214232ba --- /dev/null +++ b/dbms/tests/queries/0_stateless/00980_crash_nullable_decimal.sql @@ -0,0 +1,8 @@ +select arrayReduce('median', [toDecimal32OrNull('1', 2)]); +select arrayReduce('median', [toDecimal64OrNull('1', 2)]); +select arrayReduce('median', [toDecimal128OrZero('1', 2)]); +select arrayReduce('sum', [toDecimal128OrNull('1', 2)]); + +select arrayReduce('median', [toDecimal128OrNull('1', 2)]); +select arrayReduce('quantile(0.2)', [toDecimal128OrNull('1', 2)]); +select arrayReduce('medianExact', [toDecimal128OrNull('1', 2)]); diff --git a/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference b/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.reference @@ -0,0 +1 @@ +1 diff --git a/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql b/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql new file mode 100644 index 00000000000..cccc4a81038 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00980_skip_unused_shards_without_sharding_key.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t_local; +DROP TABLE IF EXISTS t_distr; + +CREATE TABLE t_local (a Int) ENGINE = Memory; +CREATE TABLE t_distr (a Int) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 't_local'); + +INSERT INTO t_local VALUES (1), (2); +SET optimize_skip_unused_shards = 1; +SELECT * FROM t_distr WHERE a = 1; + +DROP table t_local; +DROP table t_distr; diff --git a/dbms/tests/queries/0_stateless/00981_no_virtual_columns_in_system_columns.reference b/dbms/tests/queries/0_stateless/00981_no_virtual_columns_in_system_columns.reference new file mode 100644 index 00000000000..a7ec77dc030 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00981_no_virtual_columns_in_system_columns.reference @@ -0,0 +1 @@ +default merge_ab x UInt8 0 0 0 0 0 0 0 diff --git a/dbms/tests/queries/0_stateless/00981_no_virtual_columns_in_system_columns.sql b/dbms/tests/queries/0_stateless/00981_no_virtual_columns_in_system_columns.sql new file mode 100644 index 00000000000..476377b4ddf --- /dev/null +++ b/dbms/tests/queries/0_stateless/00981_no_virtual_columns_in_system_columns.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS merge_a; +DROP TABLE IF EXISTS merge_b; +DROP TABLE IF EXISTS merge_ab; + +CREATE TABLE merge_a (x UInt8) ENGINE = StripeLog; +CREATE TABLE merge_b (x UInt8) ENGINE = StripeLog; +CREATE TABLE merge_ab AS merge(currentDatabase(), '^merge_[ab]$'); + +SELECT * FROM system.columns WHERE database = currentDatabase() AND table = 'merge_ab'; + +DROP TABLE merge_a; +DROP TABLE merge_b; +DROP TABLE merge_ab; diff --git a/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference b/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql b/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql new file mode 100644 index 00000000000..7192642bcde --- /dev/null +++ b/dbms/tests/queries/0_stateless/00982_low_cardinality_setting_in_mv.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS mat_view; + +CREATE TABLE test1 (a LowCardinality(String)) ENGINE=MergeTree() ORDER BY a; +CREATE TABLE test2 (a UInt64) engine=MergeTree() ORDER BY a; +CREATE MATERIALIZED VIEW test_mv TO test2 AS SELECT toUInt64(a = 'test') FROM test1; + +DROP TABLE test_mv; +DROP TABLE test1; +DROP TABLE test2; diff --git a/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference b/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql b/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql new file mode 100644 index 00000000000..7e138df20f5 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00983_summing_merge_tree_not_an_identifier.sql @@ -0,0 +1,13 @@ +CREATE TABLE xx +( + `date` Date, + `id` Int64, + `clicks` Int64, + `price` Float64, + `spend` Float64 +) +ENGINE = SummingMergeTree([price, spend]) +PARTITION BY toYYYYMM(date) +ORDER BY id +SAMPLE BY id +SETTINGS index_granularity = 8192; -- { serverError 223 } diff --git a/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql b/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql index f24a4221779..9613d3b8f67 100644 --- a/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql +++ b/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + SELECT loyalty, count() diff --git a/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql b/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql index f009a856ba9..d3e73faa7be 100644 --- a/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql +++ b/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql @@ -1,3 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + USE test; DROP TABLE IF EXISTS join; diff --git a/dbms/tests/queries/1_stateful/00074_full_join.sql b/dbms/tests/queries/1_stateful/00074_full_join.sql index 44c0987fd61..4497d82a666 100644 --- a/dbms/tests/queries/1_stateful/00074_full_join.sql +++ b/dbms/tests/queries/1_stateful/00074_full_join.sql @@ -1,3 +1,5 @@ +set any_join_distinct_right_table_keys = 1; + SELECT CounterID, hits, diff --git a/docs/en/data_types/enum.md b/docs/en/data_types/enum.md index 754b6651f56..247ec070190 100644 --- a/docs/en/data_types/enum.md +++ b/docs/en/data_types/enum.md @@ -1,26 +1,29 @@ +# Enum -# Enum8, Enum16 +Enumerated type storing pairs of the `'string' = integer` format. -Includes the `Enum8` and `Enum16` types. `Enum` saves the finite set of pairs of `'string' = integer`. In ClickHouse, all operations with the `Enum` data type are performed as if value contains integers, although the user is working with string constants. This is more effective in terms of performance than working with the `String` data type. +ClickHouse supports: -- `Enum8` is described by pairs of `'String' = Int8`. -- `Enum16` is described by pairs of `'String' = Int16`. +- 8-bit `Enum`. It can contain up to 256 values with enumeration of `[-128, 127]`. +- 16-bit `Enum`. It can contain up to 65536 values with enumeration of `[-32768, 32767]`. + +ClickHouse automatically chooses a type for `Enum` at data insertion. Also, you can use `Enum8` or `Enum16` types to be sure in size of storage. ## Usage examples Here we create a table with an `Enum8('hello' = 1, 'world' = 2)` type column: -``` +```sql CREATE TABLE t_enum ( - x Enum8('hello' = 1, 'world' = 2) + x Enum('hello' = 1, 'world' = 2) ) ENGINE = TinyLog ``` -This column `x` can only store the values that are listed in the type definition: `'hello'` or `'world'`. If you try to save any other value, ClickHouse will generate an exception. +This column `x` can only store the values that are listed in the type definition: `'hello'` or `'world'`. If you try to save any other value, ClickHouse will generate an exception. ClickHouse automatically chooses the 8-bit size for enumeration of this `Enum`. -``` +```sql :) INSERT INTO t_enum VALUES ('hello'), ('world'), ('hello') INSERT INTO t_enum VALUES @@ -35,12 +38,12 @@ INSERT INTO t_enum VALUES Exception on client: -Code: 49. DB::Exception: Unknown element 'a' for type Enum8('hello' = 1, 'world' = 2) +Code: 49. DB::Exception: Unknown element 'a' for type Enum('hello' = 1, 'world' = 2) ``` When you query data from the table, ClickHouse outputs the string values from `Enum`. -``` +```sql SELECT * FROM t_enum ┌─x─────┐ @@ -52,7 +55,7 @@ SELECT * FROM t_enum If you need to see the numeric equivalents of the rows, you must cast the `Enum` value to integer type. -``` +```sql SELECT CAST(x, 'Int8') FROM t_enum ┌─CAST(x, 'Int8')─┐ @@ -64,12 +67,12 @@ SELECT CAST(x, 'Int8') FROM t_enum To create an Enum value in a query, you also need to use `CAST`. -``` -SELECT toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')) +```sql +SELECT toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)')) -┌─toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)'))─┐ -│ Enum8('a' = 1, 'b' = 2) │ -└──────────────────────────────────────────────────────┘ +┌─toTypeName(CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)'))─┐ +│ Enum8('a' = 1, 'b' = 2) │ +└─────────────────────────────────────────────────────┘ ``` ## General rules and usage diff --git a/docs/en/getting_started/example_datasets/metrica.md b/docs/en/getting_started/example_datasets/metrica.md index c26332ad563..75741ba0b54 100644 --- a/docs/en/getting_started/example_datasets/metrica.md +++ b/docs/en/getting_started/example_datasets/metrica.md @@ -1,5 +1,5 @@ # Anonymized Yandex.Metrica Data -Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. Each of the tables can be downloaded as a compressed `tsv.xz` file or as prepared partitions. +Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. Each of the tables can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at `https://clickhouse-datasets.s3.yandex.net/hits/tsv/hits_100m_obfuscated_v1.tsv.xz` and as prepared partitions at `https://clickhouse-datasets.s3.yandex.net/hits/partitions/hits_100m_obfuscated_v1.tar.xz`. ## Obtaining Tables from Prepared Partitions **Download and import hits:** diff --git a/docs/en/operations/monitoring.md b/docs/en/operations/monitoring.md index f90f647ccea..15a233079f7 100644 --- a/docs/en/operations/monitoring.md +++ b/docs/en/operations/monitoring.md @@ -21,7 +21,7 @@ It is highly recommended to set up monitoring for: ClickHouse server has embedded instruments for self-state monitoring. -To track server events use server logs. See the [logger](#server_settings-logger) section of the configuration file. +To track server events use server logs. See the [logger](server_settings/settings.md#server_settings-logger) section of the configuration file. ClickHouse collects: @@ -30,7 +30,7 @@ ClickHouse collects: You can find metrics in the [system.metrics](#system_tables-metrics), [system.events](#system_tables-events), and [system.asynchronous_metrics](#system_tables-asynchronous_metrics) tables. -You can configure ClickHouse to export metrics to [Graphite](https://github.com/graphite-project). See the [Graphite section](server_settings/settings.md#server_settings-graphite) in the ClickHouse server configuration file. Before configuring export of metrics, you should set up Graphite by following their official guide https://graphite.readthedocs.io/en/latest/install.html. +You can configure ClickHouse to export metrics to [Graphite](https://github.com/graphite-project). See the [Graphite section](server_settings/settings.md#server_settings-graphite) in the ClickHouse server configuration file. Before configuring export of metrics, you should set up Graphite by following their official [guide](https://graphite.readthedocs.io/en/latest/install.html). Additionally, you can monitor server availability through the HTTP API. Send the `HTTP GET` request to `/`. If the server is available, it responds with `200 OK`. diff --git a/docs/en/operations/table_engines/mergetree.md b/docs/en/operations/table_engines/mergetree.md index 2c2141a0a87..2a099a8947d 100644 --- a/docs/en/operations/table_engines/mergetree.md +++ b/docs/en/operations/table_engines/mergetree.md @@ -81,9 +81,9 @@ For descriptions of request parameters, see the [request description](../../quer - `merge_with_ttl_timeout` — Minimum delay in seconds before repeating a merge with TTL. Default value: 86400 (1 day). -**Example of setting the sections ** +**Example of setting the sections** -``` +```sql ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192 ``` @@ -125,7 +125,7 @@ The `MergeTree` engine is configured in the same way as in the example above for ## Data Storage -A table consists of data *parts* sorted by primary key. +A table consists of data parts sorted by primary key. When data is inserted in a table, separate data parts are created and each of them is lexicographically sorted by primary key. For example, if the primary key is `(CounterID, Date)`, the data in the part is sorted by `CounterID`, and within each `CounterID`, it is ordered by `Date`. diff --git a/docs/en/query_language/agg_functions/reference.md b/docs/en/query_language/agg_functions/reference.md index f9cb88c0113..350803f5aef 100644 --- a/docs/en/query_language/agg_functions/reference.md +++ b/docs/en/query_language/agg_functions/reference.md @@ -1009,7 +1009,7 @@ SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) ## stochasticLinearRegression {#agg_functions-stochasticlinearregression} -This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size and has few methods for updating weights ([simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). +This function implements stochastic linear regression. It supports custom parameters for learning rate, L2 regularization coefficient, mini-batch size and has few methods for updating weights ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (used by default), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). ### Parameters {#agg_functions-stochasticlinearregression-parameters} @@ -1022,7 +1022,7 @@ stochasticLinearRegression(1.0, 1.0, 10, 'SGD') 1. `learning rate` is the coefficient on step length, when gradient descent step is performed. Too big learning rate may cause infinite weights of the model. Default is `0.00001`. 2. `l2 regularization coefficient` which may help to prevent overfitting. Default is `0.1`. 3. `mini-batch size` sets the number of elements, which gradients will be computed and summed to perform one step of gradient descent. Pure stochastic descent uses one element, however having small batches(about 10 elements) make gradient steps more stable. Default is `15`. -4. `method for updating weights`, there are 3 of them: `SGD`, `Momentum`, `Nesterov`. `Momentum` and `Nesterov` require little bit more computations and memory, however they happen to be useful in terms of speed of convergance and stability of stochastic gradient methods. Default is `'SGD'`. +4. `method for updating weights`, they are: `Adam` (by default), `SGD`, `Momentum`, `Nesterov`. `Momentum` and `Nesterov` require little bit more computations and memory, however they happen to be useful in terms of speed of convergance and stability of stochastic gradient methods. ### Usage {#agg_functions-stochasticlinearregression-usage} diff --git a/docs/en/query_language/create.md b/docs/en/query_language/create.md index bd2228efa94..81d7982eb00 100644 --- a/docs/en/query_language/create.md +++ b/docs/en/query_language/create.md @@ -109,25 +109,7 @@ Defines storage time for values. Can be specified only for MergeTree-family tabl ## Column Compression Codecs -Besides default data compression, defined in [server settings](../operations/server_settings/settings.md#compression), per-column specification is also available. - -Supported compression algorithms: - -- `NONE` — No compression. -- `LZ4` — Lossless [data compression algorithm](https://github.com/lz4/lz4) used by default. Applies LZ4 fast compression. -- `LZ4HC[(level)]` — LZ4 CH (high compression) algorithm with configurable level. Default level: 9. If you set `level <= 0`, the default level is applied. Possible levels: [1, 12]. Recommended levels are in range: [4, 9]. -- `ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: [1, 22]. Default value: 1. -- `Delta(delta_bytes)` — compression approach, when raw values are replaced with the difference of two neighbour values. Up to `delta_bytes` are used for storing delta value, so `delta_bytes` is a maximum size of raw values. -Possible `delta_bytes` values: 1, 2, 4, 8. Default value for `delta_bytes` is `sizeof(type)`, if it is equals to 1, 2, 4, 8. Otherwise it equals 1. -- `DoubleDelta` — Compresses values down to 1 bit (in the best case), using deltas calculation. Best compression rates are achieved on monotonic sequences with constant stride, for example, time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64 bit types. Uses 1 extra bit for 32 byte deltas: 5 bit prefix instead of 4 bit prefix. For additional information, see the "Compressing time stamps" section of the [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) document. -- `Gorilla` — Compresses values down to 1 bit (in the best case). The codec is efficient when storing series of floating point values that change slowly, because the best compression rate is achieved when neighbouring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64 bit types. For additional information, see the "Compressing values" section of the [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) document. - -High compression levels useful for asymmetric scenarios, like compress once, decompress a lot of times. Greater levels stands for better compression and higher CPU usage. - -!!!warning - You cannot decompress ClickHouse database files with external utilities, for example, `lz4`. Use the special utility [clickhouse-compressor](https://github.com/yandex/ClickHouse/tree/master/dbms/programs/compressor). - -Syntax example: +By default, ClickHouse applies to columns the compression method, defined in [server settings](../operations/server_settings/settings.md#compression). Also, you can define compression method for each individual column in the `CREATE TABLE` query. ``` CREATE TABLE codec_example @@ -136,28 +118,48 @@ CREATE TABLE codec_example ts DateTime CODEC(LZ4HC), float_value Float32 CODEC(NONE), double_value Float64 CODEC(LZ4HC(9)) -) -ENGINE = MergeTree -PARTITION BY tuple() -ORDER BY dt -``` - -Codecs can be combined in a pipeline. Default table codec is not included into pipeline (if it should be applied to a column, you have to specify it explicitly in pipeline). Example below shows an optimization approach for storing timeseries metrics. -Usually, values for particular metric, stored in `path` does not differ significantly from point to point. Using delta-encoding allows to reduce disk space usage significantly. - -``` -CREATE TABLE timeseries_example -( - dt Date, - ts DateTime, - path String, value Float32 CODEC(Delta, ZSTD) ) -ENGINE = MergeTree -PARTITION BY dt -ORDER BY (path, ts) +ENGINE = +... ``` +If a codec is specified, the default codec doesn't apply. Codecs can be combined in a pipeline, for example, `CODEC(Delta, ZSTD)`. To select the best codecs combination for you project, pass benchmarks, similar to described in the Altinity [New Encodings to Improve ClickHouse Efficiency](https://www.altinity.com/blog/2019/7/new-encodings-to-improve-clickhouse) article. + +!!!warning + You cannot decompress ClickHouse database files with external utilities, for example, `lz4`. Use the special utility, [clickhouse-compressor](https://github.com/yandex/ClickHouse/tree/master/dbms/programs/compressor). + +Compression is supported for the table engines: + +- [*MergeTree](../operations/table_engines/mergetree.md) family +- [*Log](../operations/table_engines/log_family.md) family +- [Set](../operations/table_engines/set.md) +- [Join](../operations/table_engines/join.md) + +ClickHouse supports common purpose codecs and specialized codecs. + +### Specialized codecs {#create-query-specialized-codecs} + +These codecs are designed to make compression more effective using specifities of the data. Some of this codecs don't compress data by itself, but they prepare data to be compressed better by common purpose codecs. + +Specialized codecs: + +- `Delta(delta_bytes)` — Compression approach, when raw values are replaced with the difference of two neighbor values. Up to `delta_bytes` are used for storing delta value, so `delta_bytes` is a maximum size of raw values. Possible `delta_bytes` values: 1, 2, 4, 8. Default value for `delta_bytes` is `sizeof(type)`, if it is equals to 1, 2, 4, 8. Otherwise it equals 1. +- `DoubleDelta` — Compresses values down to 1 bit (in the best case), using deltas calculation. Best compression rates are achieved on monotonic sequences with constant stride, for example, time series data. Can be used with any fixed-width type. Implements the algorithm used in Gorilla TSDB, extending it to support 64 bit types. Uses 1 extra bit for 32 byte deltas: 5 bit prefix instead of 4 bit prefix. For additional information, see the "Compressing time stamps" section of the [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) document. +- `Gorilla` — Compresses values down to 1 bit (in the best case). The codec is efficient when storing series of floating point values that change slowly, because the best compression rate is achieved when neighboring values are binary equal. Implements the algorithm used in Gorilla TSDB, extending it to support 64 bit types. For additional information, see the "Compressing values" section of the [Gorilla: A Fast, Scalable, In-Memory Time Series Database](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf) document. +- `T64` — Compression approach that crops unused high bits of values in integer data types (including `Enum`, `Date` and `DateTime`). At each step of its algorithm, codec takes a block of 64 values, puts them into 64x64 bit matrix, transposes it, crops the unused bits of values and returns the rest as a sequence. Unused bits are the bits, that don't differ between maximum and minimum values in the whole data part for which the compression is used. + +### Common purpose codecs {#create-query-common-purpose-codecs} + +Codecs: + +- `NONE` — No compression. +- `LZ4` — Lossless [data compression algorithm](https://github.com/lz4/lz4) used by default. Applies LZ4 fast compression. +- `LZ4HC[(level)]` — LZ4 CH (high compression) algorithm with configurable level. Default level: 9. If you set `level <= 0`, the default level is applied. Possible levels: [1, 12]. Recommended levels are in range: [4, 9]. +- `ZSTD[(level)]` — [ZSTD compression algorithm](https://en.wikipedia.org/wiki/Zstandard) with configurable `level`. Possible levels: [1, 22]. Default level: 1. + +High compression levels useful for asymmetric scenarios, like compress once, decompress a lot of times. Greater levels stands for better compression and higher CPU usage. + ## Temporary Tables ClickHouse supports temporary tables which have the following characteristics: diff --git a/docs/en/query_language/functions/bitmap_functions.md b/docs/en/query_language/functions/bitmap_functions.md index 27f371841af..b0d21500035 100644 --- a/docs/en/query_language/functions/bitmap_functions.md +++ b/docs/en/query_language/functions/bitmap_functions.md @@ -56,6 +56,32 @@ SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res └─────────────┘ ``` +## bitmapSubsetInRange {#bitmap_functions-bitmapsubsetinrange} + +Return subset in specified range (not include the range_end). + +``` +bitmapSubsetInRange(bitmap, range_start, range_end) +``` + +**Parameters** + +- `bitmap` – [Bitmap object](#bitmap_functions-bitmapbuild). +- `range_start` – range start point. Type: [UInt32](../../data_types/int_uint.md). +- `range_end` – range end point(excluded). Type: [UInt32](../../data_types/int_uint.md). + +**Example** + +``` sql +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + +``` +┌─res───────────────┐ +│ [30,31,32,33,100] │ +└───────────────────┘ +``` + ## bitmapContains {#bitmap_functions-bitmapcontains} Checks whether the bitmap contains an element. diff --git a/docs/en/query_language/misc.md b/docs/en/query_language/misc.md index 31bfea5dc4d..514f5d9f823 100644 --- a/docs/en/query_language/misc.md +++ b/docs/en/query_language/misc.md @@ -195,18 +195,21 @@ RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ... All tables are renamed under global locking. Renaming tables is a light operation. If you indicated another database after TO, the table will be moved to this database. However, the directories with databases must reside in the same file system (otherwise, an error is returned). -## SET +## SET {#query-set} -``` sql +```sql SET param = value ``` -Allows you to set `param` to `value`. You can also make all the settings from the specified settings profile in a single query. To do this, specify 'profile' as the setting name. For more information, see the section "Settings". -The setting is made for the session, or for the server (globally) if `GLOBAL` is specified. -When making a global setting, the setting is not applied to sessions already running, including the current session. It will only be used for new sessions. +Assigns `value` to the `param` configurations settings for the current session. You cannot change [server settings](../operations/server_settings/index.md) this way. -When the server is restarted, global settings made using `SET` are lost. -To make settings that persist after a server restart, you can only use the server's config file. +You can also set all the values from the specified settings profile in a single query. + +```sql +SET profile = 'profile-name-from-the-settings-file' +``` + +For more information, see [Settings](../operations/settings/settings.md). ## SHOW CREATE TABLE diff --git a/docs/ru/query_language/agg_functions/reference.md b/docs/ru/query_language/agg_functions/reference.md index fca564b7a14..12308169f9a 100644 --- a/docs/ru/query_language/agg_functions/reference.md +++ b/docs/ru/query_language/agg_functions/reference.md @@ -878,7 +878,7 @@ SELECT arrayReduce('simpleLinearRegression', [0, 1, 2, 3], [3, 4, 5, 6]) ## stochasticLinearRegression {#agg_functions-stochasticlinearregression} -Функция реализует стохастическую линейную регрессию. Поддерживает пользовательские параметры для скорости обучения, коэффициента регуляризации L2, размера mini-batch и имеет несколько методов обновления весов ([simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). +Функция реализует стохастическую линейную регрессию. Поддерживает пользовательские параметры для скорости обучения, коэффициента регуляризации L2, размера mini-batch и имеет несколько методов обновления весов ([Adam](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam) (по умолчанию), [simple SGD](https://en.wikipedia.org/wiki/Stochastic_gradient_descent), [Momentum](https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Momentum), [Nesterov](https://mipt.ru/upload/medialibrary/d7e/41-91.pdf)). ### Параметры {#agg_functions-stochasticlinearregression-parameters} @@ -891,7 +891,8 @@ stochasticLinearRegression(1.0, 1.0, 10, 'SGD') 1. Скорость обучения — коэффициент длины шага, при выполнении градиентного спуска. Слишком большая скорость обучения может привести к бесконечным весам модели. По умолчанию `0.00001`. 2. Коэффициент регуляризации l2. Помогает предотвратить подгонку. По умолчанию `0.1`. 3. Размер mini-batch задаёт количество элементов, чьи градиенты будут вычислены и просуммированы при выполнении одного шага градиентного спуска. Чистый стохастический спуск использует один элемент, однако использование mini-batch (около 10 элементов) делает градиентные шаги более стабильными. По умолчанию `15`. -4. Метод обновления весов, можно выбрать один из следующих: `SGD`, `Momentum`, `Nesterov`. `Momentum` и `Nesterov` более требовательные к вычислительным ресурсам и памяти, однако они имеют высокую скорость схождения и остальные методы стохастического градиента. По умолчанию `SGD`. +4. Метод обновления весов, можно выбрать один из следующих: `Adam` (по умолчанию), `SGD`, `Momentum`, `Nesterov`. `Momentum` и `Nesterov` более требовательные к вычислительным ресурсам и памяти, однако они имеют высокую скорость схождения и устойчивости методов стохастического градиента. + ### Использование {#agg_functions-stochasticlinearregression-usage} @@ -1005,4 +1006,3 @@ stochasticLogisticRegression(1.0, 1.0, 10, 'SGD') - [Отличие линейной от логистической регрессии](https://moredez.ru/q/51225972/) [Оригинальная статья](https://clickhouse.yandex/docs/ru/query_language/agg_functions/reference/) - diff --git a/docs/zh/data_types/domain/ipv4.md b/docs/zh/data_types/domain/ipv4.md new file mode 100644 index 00000000000..4adf13409fe --- /dev/null +++ b/docs/zh/data_types/domain/ipv4.md @@ -0,0 +1,78 @@ +## IPv4 + +`IPv4`是与`UInt32`类型保持二进制兼容的Domain类型,其用于存储IPv4地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 + +### 基本使用 + +``` sql +CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY url; + +DESCRIBE TABLE hits; +``` + +``` +┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ +│ url │ String │ │ │ │ │ +│ from │ IPv4 │ │ │ │ │ +└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ +``` + +同时您也可以使用`IPv4`类型的列作为主键: + +``` sql +CREATE TABLE hits (url String, from IPv4) ENGINE = MergeTree() ORDER BY from; +``` + +在写入与查询时,`IPv4`类型能够识别可读性更加友好的输入输出格式: + +``` sql +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '116.253.40.133')('https://clickhouse.yandex', '183.247.232.58')('https://clickhouse.yandex/docs/en/', '116.106.34.242'); + +SELECT * FROM hits; +``` + +``` +┌─url────────────────────────────────┬───────────from─┐ +│ https://clickhouse.yandex/docs/en/ │ 116.106.34.242 │ +│ https://wikipedia.org │ 116.253.40.133 │ +│ https://clickhouse.yandex │ 183.247.232.58 │ +└────────────────────────────────────┴────────────────┘ +``` + +同时它提供更为紧凑的二进制存储格式: + +``` sql +SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; +``` + +``` +┌─toTypeName(from)─┬─hex(from)─┐ +│ IPv4 │ B7F7E83A │ +└──────────────────┴───────────┘ +``` + +不可隐式转换为除`UInt32`以外的其他类型类型。如果要将`IPv4`类型的值转换成字符串,你可以使用`IPv4NumToString()`显示的进行转换: + +``` sql +SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; +``` + +``` +┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ +│ String │ 183.247.232.58 │ +└───────────────────────────────────┴────────────────┘ +``` + +或可以使用`CAST`将它转换为`UInt32`类型: + +``` sql +SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; +``` + +``` +┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ +│ UInt32 │ 3086477370 │ +└──────────────────────────────────┴────────────┘ +``` + +[来源文章](https://clickhouse.yandex/docs/en/data_types/domains/ipv4) diff --git a/docs/zh/data_types/domain/ipv6.md b/docs/zh/data_types/domain/ipv6.md new file mode 100644 index 00000000000..1209350990f --- /dev/null +++ b/docs/zh/data_types/domain/ipv6.md @@ -0,0 +1,78 @@ +## IPv6 + +`IPv6`是与`FixedString(16)`类型保持二进制兼容的Domain类型,其用于存储IPv6地址的值。它提供了更为紧凑的二进制存储的同时支持识别可读性更加友好的输入输出格式。 + +### 基本用法 + +``` sql +CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY url; + +DESCRIBE TABLE hits; +``` + +``` +┌─name─┬─type───┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┐ +│ url │ String │ │ │ │ │ +│ from │ IPv6 │ │ │ │ │ +└──────┴────────┴──────────────┴────────────────────┴─────────┴──────────────────┘ +``` + +同时您也可以使用`IPv6`类型的列作为主键: + +``` sql +CREATE TABLE hits (url String, from IPv6) ENGINE = MergeTree() ORDER BY from; +``` + +在写入与查询时,`IPv6`类型能够识别可读性更加友好的输入输出格式: + +``` sql +INSERT INTO hits (url, from) VALUES ('https://wikipedia.org', '2a02:aa08:e000:3100::2')('https://clickhouse.yandex', '2001:44c8:129:2632:33:0:252:2')('https://clickhouse.yandex/docs/en/', '2a02:e980:1e::1'); + +SELECT * FROM hits; +``` + +``` +┌─url────────────────────────────────┬─from──────────────────────────┐ +│ https://clickhouse.yandex │ 2001:44c8:129:2632:33:0:252:2 │ +│ https://clickhouse.yandex/docs/en/ │ 2a02:e980:1e::1 │ +│ https://wikipedia.org │ 2a02:aa08:e000:3100::2 │ +└────────────────────────────────────┴───────────────────────────────┘ +``` + +同时它提供更为紧凑的二进制存储格式: + +``` sql +SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; +``` + +``` +┌─toTypeName(from)─┬─hex(from)────────────────────────┐ +│ IPv6 │ 200144C8012926320033000002520002 │ +└──────────────────┴──────────────────────────────────┘ +``` + +不可隐式转换为除`FixedString(16)`以外的其他类型类型。如果要将`IPv6`类型的值转换成字符串,你可以使用`IPv6NumToString()`显示的进行转换: + +``` sql +SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; +``` + +``` +┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐ +│ String │ 2001:44c8:129:2632:33:0:252:2 │ +└───────────────────────────────────┴───────────────────────────────┘ +``` + +或使用`CAST`将其转换为`FixedString(16)`: + +``` sql +SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; +``` + +``` +┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐ +│ FixedString(16) │ ��� │ +└───────────────────────────────────────────┴─────────┘ +``` + +[来源文章](https://clickhouse.yandex/docs/en/data_types/domains/ipv6) diff --git a/docs/zh/data_types/domain/overview.md b/docs/zh/data_types/domain/overview.md new file mode 100644 index 00000000000..b4db116e75b --- /dev/null +++ b/docs/zh/data_types/domain/overview.md @@ -0,0 +1,26 @@ +# Domains + +Domain类型是特定实现的类型,它总是与某个现存的基础类型保持二进制兼容的同时添加一些额外的特性,以能够在维持磁盘数据不变的情况下使用这些额外的特性。目前ClickHouse暂不支持自定义domain类型。 + +如果你可以在一个地方使用与Domain类型二进制兼容的基础类型,那么在相同的地方您也可以使用Domain类型,例如: + +* 使用Domain类型作为表中列的类型 +* 对Domain类型的列进行读/写数据 +* 如果与Domain二进制兼容的基础类型可以作为索引,那么Domain类型也可以作为索引 +* 将Domain类型作为参数传递给函数使用 +* 其他 + +### Domains的额外特性 + +* 在执行SHOW CREATE TABLE 或 DESCRIBE TABLE时,其对应的列总是展示为Domain类型的名称 +* 在INSERT INTO domain_table(domain_column) VALUES(...)中输入数据总是以更人性化的格式进行输入 +* 在SELECT domain_column FROM domain_table中数据总是以更人性化的格式输出 +* 在INSERT INTO domain_table FORMAT CSV ...中,实现外部源数据以更人性化的格式载入 + +### Domains类型的限制 + +* 无法通过`ALTER TABLE`将基础类型的索引转换为Domain类型的索引。 +* 当从其他列或表插入数据时,无法将string类型的值隐式地转换为Domain类型的值。 +* 无法对存储为Domain类型的值添加约束。 + +[来源文章](https://clickhouse.yandex/docs/en/data_types/domains/overview) diff --git a/docs/zh/query_language/functions/bitmap_functions.md b/docs/zh/query_language/functions/bitmap_functions.md index ff05aecf9b3..97be4f38853 100644 --- a/docs/zh/query_language/functions/bitmap_functions.md +++ b/docs/zh/query_language/functions/bitmap_functions.md @@ -51,6 +51,56 @@ SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])) AS res └─────────────┘ ``` +## bitmapSubsetInRange + +将位图指定范围(不包含range_end)转换为另一个位图。 + +``` +bitmapSubsetInRange(bitmap, range_start, range_end) +``` + +**参数** + +- `bitmap` – 位图对象. +- `range_start` – 范围起始点(含). +- `range_end` – 范围结束点(不含). + +**示例** + +``` sql +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,100,200,500]), toUInt32(30), toUInt32(200))) AS res +``` + +``` +┌─res───────────────┐ +│ [30,31,32,33,100] │ +└───────────────────┘ +``` + +## bitmapContains + +检查位图是否包含指定元素。 + +``` +bitmapContains(haystack, needle) +``` + +**参数** + +- `haystack` – 位图对象. +- `needle` – 元素,类型UInt32. + +**示例** + +``` sql +SELECT bitmapContains(bitmapBuild([1,5,7,9]), toUInt32(9)) AS res +``` +```text +┌─res─┐ +│ 1 │ +└─────┘ +``` + ## bitmapHasAny 与`hasAny(array,array)`类似,如果位图有任何公共元素则返回1,否则返回0。 diff --git a/libs/libcommon/CMakeLists.txt b/libs/libcommon/CMakeLists.txt index ce8c5801613..885a6f0ec0b 100644 --- a/libs/libcommon/CMakeLists.txt +++ b/libs/libcommon/CMakeLists.txt @@ -123,6 +123,7 @@ target_link_libraries (common PUBLIC ${Boost_SYSTEM_LIBRARY} PRIVATE + ${CMAKE_DL_LIBS} ${MALLOC_LIBRARIES} Threads::Threads ${MEMCPY_LIBRARIES}) diff --git a/libs/libcommon/include/ext/enumerate.h b/libs/libcommon/include/ext/enumerate.h index bb6f63ee148..9a55d853e35 100644 --- a/libs/libcommon/include/ext/enumerate.h +++ b/libs/libcommon/include/ext/enumerate.h @@ -26,7 +26,7 @@ namespace ext std::size_t idx; It it; - enumerate_iterator(const std::size_t idx, It it) : idx{idx}, it{it} {} + enumerate_iterator(const std::size_t idx_, It it_) : idx{idx_}, it{it_} {} auto operator*() const { return reference(idx, *it); } @@ -42,7 +42,7 @@ namespace ext Collection & collection; - enumerate_wrapper(Collection & collection) : collection(collection) {} + enumerate_wrapper(Collection & collection_) : collection(collection_) {} auto begin() { return iterator(0, std::begin(collection)); } auto end() { return iterator(ext::size(collection), std::end(collection)); } diff --git a/libs/libcommon/include/ext/scope_guard.h b/libs/libcommon/include/ext/scope_guard.h index 4162d80b77d..c2c7e5ec630 100644 --- a/libs/libcommon/include/ext/scope_guard.h +++ b/libs/libcommon/include/ext/scope_guard.h @@ -9,13 +9,13 @@ template class scope_guard { const F function; public: - constexpr scope_guard(const F & function) : function{function} {} - constexpr scope_guard(F && function) : function{std::move(function)} {} + constexpr scope_guard(const F & function_) : function{function_} {} + constexpr scope_guard(F && function_) : function{std::move(function_)} {} ~scope_guard() { function(); } }; template -inline scope_guard make_scope_guard(F && function) { return std::forward(function); } +inline scope_guard make_scope_guard(F && function_) { return std::forward(function_); } } diff --git a/libs/libcommon/src/DateLUT.cpp b/libs/libcommon/src/DateLUT.cpp index ce3e7e32a26..183d003ffe8 100644 --- a/libs/libcommon/src/DateLUT.cpp +++ b/libs/libcommon/src/DateLUT.cpp @@ -1,6 +1,6 @@ #include -#include +#include #include #include #include @@ -26,7 +26,7 @@ Poco::DigestEngine::Digest calcSHA1(const std::string & path) std::string determineDefaultTimeZone() { - namespace fs = boost::filesystem; + namespace fs = std::filesystem; const char * tzdir_env_var = std::getenv("TZDIR"); fs::path tz_database_path = tzdir_env_var ? tzdir_env_var : "/usr/share/zoneinfo/"; @@ -87,7 +87,10 @@ std::string determineDefaultTimeZone() /// Try the same with full symlinks resolution { - tz_file_path = fs::canonical(tz_file_path, tz_database_path); + if (!tz_file_path.is_absolute()) + tz_file_path = tz_database_path / tz_file_path; + + tz_file_path = fs::canonical(tz_file_path); fs::path relative_path = tz_file_path.lexically_relative(tz_database_path); if (!relative_path.empty() && *relative_path.begin() != ".." && *relative_path.begin() != ".") @@ -109,11 +112,11 @@ std::string determineDefaultTimeZone() { /// Some timezone databases contain copies of toplevel tzdata files in the posix/ directory /// and tzdata files with leap seconds in the right/ directory. Skip them. - candidate_it.no_push(); + candidate_it.disable_recursion_pending(); continue; } - if (candidate_it->status().type() != fs::regular_file || path.filename() == "localtime") + if (!fs::is_regular_file(*candidate_it) || path.filename() == "localtime") continue; if (fs::file_size(path) == tzfile_size && calcSHA1(path.string()) == tzfile_sha1) diff --git a/libs/libloggers/loggers/ExtendedLogChannel.h b/libs/libloggers/loggers/ExtendedLogChannel.h index e70cd7b3094..5388c6aa467 100644 --- a/libs/libloggers/loggers/ExtendedLogChannel.h +++ b/libs/libloggers/loggers/ExtendedLogChannel.h @@ -13,7 +13,7 @@ namespace DB class ExtendedLogMessage { public: - explicit ExtendedLogMessage(const Poco::Message & base) : base(base) {} + explicit ExtendedLogMessage(const Poco::Message & base_) : base(base_) {} /// Attach additional data to the message static ExtendedLogMessage getFrom(const Poco::Message & base);