Implemented some suggestions from clang-tidy (part 3) [#CLICKHOUSE-3301].

This commit is contained in:
Alexey Milovidov 2017-09-08 06:47:27 +03:00
parent f1c98dac3c
commit d29c77adea
53 changed files with 211 additions and 186 deletions

View File

@ -41,7 +41,7 @@ struct CollectAliases
ASTPtr node;
Kind kind;
AliasInfo(ASTPtr node, Kind kind) : node(node), kind(kind) {}
AliasInfo(const ASTPtr & node, Kind kind) : node(node), kind(kind) {}
};
using Aliases = std::unordered_map<String, AliasInfo>;

View File

@ -17,7 +17,7 @@ using ConstSetPtr = std::shared_ptr<const Set>;
class ColumnSet final : public IColumnDummy
{
public:
ColumnSet(size_t s_, ConstSetPtr data_) : IColumnDummy(s_), data(data_) {}
ColumnSet(size_t s_, const ConstSetPtr & data_) : IColumnDummy(s_), data(data_) {}
/// The column is not a constant. Otherwise, the column will be used in calculations in ExpressionActions::prepare, when a set from subquery is not ready yet.
bool isConst() const override { return false; }

View File

@ -18,7 +18,7 @@ class AddingDefaultBlockOutputStream : public IBlockOutputStream
{
public:
AddingDefaultBlockOutputStream(
BlockOutputStreamPtr output_,
const BlockOutputStreamPtr & output_,
NamesAndTypesListPtr required_columns_,
const ColumnDefaults & column_defaults_,
const Context & context_,

View File

@ -18,7 +18,7 @@ namespace ErrorCodes
BlockInputStreamFromRowInputStream::BlockInputStreamFromRowInputStream(
RowInputStreamPtr row_input_,
const RowInputStreamPtr & row_input_,
const Block & sample_,
size_t max_block_size_,
UInt64 allow_errors_num_,

View File

@ -18,7 +18,7 @@ class BlockInputStreamFromRowInputStream : public IProfilingBlockInputStream
public:
/** sample_ - block with zero rows, that structure describes how to interpret values */
BlockInputStreamFromRowInputStream(
RowInputStreamPtr row_input_,
const RowInputStreamPtr & row_input_,
const Block & sample_,
size_t max_block_size_,
UInt64 allow_errors_num_,

View File

@ -206,8 +206,8 @@ bool CSVRowInputStream::parseRowAndPrintDiagnosticInfo(Block & block,
<< "name: " << sample.safeGetByPosition(i).name << ", " << std::string(max_length_of_column_name - sample.safeGetByPosition(i).name.size(), ' ')
<< "type: " << data_types[i]->getName() << ", " << std::string(max_length_of_data_type_name - data_types[i]->getName().size(), ' ');
auto prev_position = istr.position();
auto curr_position = istr.position();
BufferBase::Position prev_position = istr.position();
BufferBase::Position curr_position = istr.position();
std::exception_ptr exception;
try

View File

@ -15,11 +15,13 @@ namespace DB
class CollapsingFinalBlockInputStream : public IProfilingBlockInputStream
{
public:
CollapsingFinalBlockInputStream(BlockInputStreams inputs_, const SortDescription & description_,
const String & sign_column_name_)
CollapsingFinalBlockInputStream(
const BlockInputStreams & inputs,
const SortDescription & description_,
const String & sign_column_name_)
: description(description_), sign_column_name(sign_column_name_)
{
children.insert(children.end(), inputs_.begin(), inputs_.end());
children.insert(children.end(), inputs.begin(), inputs.end());
}
~CollapsingFinalBlockInputStream();
@ -55,10 +57,10 @@ private:
struct MergingBlock : boost::noncopyable
{
MergingBlock(Block block_,
MergingBlock(const Block & block_,
size_t stream_index_,
const SortDescription & desc,
String sign_column_name,
const String & sign_column_name,
BlockPlainPtrs * output_blocks)
: block(block_), stream_index(stream_index_), output_blocks(output_blocks)
{

View File

@ -16,7 +16,7 @@ public:
CountingBlockOutputStream(const BlockOutputStreamPtr & stream_)
: stream(stream_) {}
void setProgressCallback(ProgressCallback callback)
void setProgressCallback(const ProgressCallback & callback)
{
progress_callback = callback;
}
@ -35,9 +35,9 @@ public:
void writePrefix() override { stream->writePrefix(); }
void writeSuffix() override { stream->writeSuffix(); }
void flush() override { stream->flush(); }
void flush() override { stream->flush(); }
void onProgress(const Progress & progress) override { stream->onProgress(progress); }
String getContentType() const override { return stream->getContentType(); }
String getContentType() const override { return stream->getContentType(); }
protected:

View File

@ -117,18 +117,21 @@ void CreatingSetsBlockInputStream::createOne(SubqueryForSet & subquery)
if ((max_rows_to_transfer && rows_to_transfer > max_rows_to_transfer)
|| (max_bytes_to_transfer && bytes_to_transfer > max_bytes_to_transfer))
{
if (transfer_overflow_mode == OverflowMode::THROW)
throw Exception("IN/JOIN external table size limit exceeded."
" Rows: " + toString(rows_to_transfer)
+ ", limit: " + toString(max_rows_to_transfer)
+ ". Bytes: " + toString(bytes_to_transfer)
+ ", limit: " + toString(max_bytes_to_transfer) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
if (transfer_overflow_mode == OverflowMode::BREAK)
done_with_table = true;
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
switch (transfer_overflow_mode)
{
case OverflowMode::THROW:
throw Exception("IN/JOIN external table size limit exceeded."
" Rows: " + toString(rows_to_transfer)
+ ", limit: " + toString(max_rows_to_transfer)
+ ". Bytes: " + toString(bytes_to_transfer)
+ ", limit: " + toString(max_bytes_to_transfer) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
case OverflowMode::BREAK:
done_with_table = true;
break;
default:
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
}
}

View File

@ -18,7 +18,7 @@ class CreatingSetsBlockInputStream : public IProfilingBlockInputStream
{
public:
CreatingSetsBlockInputStream(
BlockInputStreamPtr input,
const BlockInputStreamPtr & input,
const SubqueriesForSets & subqueries_for_sets_,
const Limits & limits)
: subqueries_for_sets(subqueries_for_sets_),

View File

@ -8,8 +8,8 @@ namespace ErrorCodes
extern const int SET_SIZE_LIMIT_EXCEEDED;
}
DistinctBlockInputStream::DistinctBlockInputStream(const BlockInputStreamPtr & input, const Limits & limits, size_t limit_hint_, Names columns_)
: columns_names(columns_)
DistinctBlockInputStream::DistinctBlockInputStream(const BlockInputStreamPtr & input, const Limits & limits, size_t limit_hint_, const Names & columns)
: columns_names(columns)
, limit_hint(limit_hint_)
, max_rows(limits.max_rows_in_distinct)
, max_bytes(limits.max_bytes_in_distinct)
@ -68,18 +68,22 @@ Block DistinctBlockInputStream::readImpl()
if (!checkLimits())
{
if (overflow_mode == OverflowMode::THROW)
throw Exception("DISTINCT-Set size limit exceeded."
" Rows: " + toString(data.getTotalRowCount()) +
", limit: " + toString(max_rows) +
". Bytes: " + toString(data.getTotalByteCount()) +
", limit: " + toString(max_bytes) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
switch (overflow_mode)
{
case OverflowMode::THROW:
throw Exception("DISTINCT-Set size limit exceeded."
" Rows: " + toString(data.getTotalRowCount()) +
", limit: " + toString(max_rows) +
". Bytes: " + toString(data.getTotalByteCount()) +
", limit: " + toString(max_bytes) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
if (overflow_mode == OverflowMode::BREAK)
return Block();
case OverflowMode::BREAK:
return Block();
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
default:
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
}
size_t all_columns = block.columns();

View File

@ -18,7 +18,7 @@ class DistinctBlockInputStream : public IProfilingBlockInputStream
{
public:
/// Empty columns_ means all collumns.
DistinctBlockInputStream(const BlockInputStreamPtr & input, const Limits & limits, size_t limit_hint_, Names columns_);
DistinctBlockInputStream(const BlockInputStreamPtr & input, const Limits & limits, size_t limit_hint_, const Names & columns);
String getName() const override { return "Distinct"; }

View File

@ -71,18 +71,22 @@ Block DistinctSortedBlockInputStream::readImpl()
if (!checkLimits())
{
if (overflow_mode == OverflowMode::THROW)
throw Exception("DISTINCT-Set size limit exceeded."
" Rows: " + toString(data.getTotalRowCount()) +
", limit: " + toString(max_rows) +
". Bytes: " + toString(data.getTotalByteCount()) +
", limit: " + toString(max_bytes) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
switch (overflow_mode)
{
case OverflowMode::THROW:
throw Exception("DISTINCT-Set size limit exceeded."
" Rows: " + toString(data.getTotalRowCount()) +
", limit: " + toString(max_rows) +
". Bytes: " + toString(data.getTotalByteCount()) +
", limit: " + toString(max_bytes) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
if (overflow_mode == OverflowMode::BREAK)
return Block();
case OverflowMode::BREAK:
return Block();
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
default:
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
}
prev_block.block = block;

View File

@ -5,7 +5,7 @@
namespace DB
{
ExpressionBlockInputStream::ExpressionBlockInputStream(const BlockInputStreamPtr & input, ExpressionActionsPtr expression_)
ExpressionBlockInputStream::ExpressionBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_)
: expression(expression_)
{
children.push_back(input);

View File

@ -19,7 +19,7 @@ private:
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
public:
ExpressionBlockInputStream(const BlockInputStreamPtr & input, ExpressionActionsPtr expression_);
ExpressionBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_);
String getName() const override;
String getID() const override;

View File

@ -17,13 +17,13 @@ namespace ErrorCodes
}
FilterBlockInputStream::FilterBlockInputStream(const BlockInputStreamPtr & input, ExpressionActionsPtr expression_, ssize_t filter_column_)
FilterBlockInputStream::FilterBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_, ssize_t filter_column_)
: expression(expression_), filter_column(filter_column_)
{
children.push_back(input);
}
FilterBlockInputStream::FilterBlockInputStream(const BlockInputStreamPtr & input, ExpressionActionsPtr expression_, const String & filter_column_name_)
FilterBlockInputStream::FilterBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_, const String & filter_column_name_)
: expression(expression_), filter_column(-1), filter_column_name(filter_column_name_)
{
children.push_back(input);

View File

@ -20,8 +20,8 @@ private:
public:
/// filter_column_ - the number of the column with filter conditions.
FilterBlockInputStream(const BlockInputStreamPtr & input, ExpressionActionsPtr expression_, ssize_t filter_column_);
FilterBlockInputStream(const BlockInputStreamPtr & input, ExpressionActionsPtr expression_, const String & filter_column_name_);
FilterBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_, ssize_t filter_column_);
FilterBlockInputStream(const BlockInputStreamPtr & input, const ExpressionActionsPtr & expression_, const String & filter_column_name_);
String getName() const override;
String getID() const override;

View File

@ -13,10 +13,10 @@ class FilterColumnsBlockInputStream : public IProfilingBlockInputStream
{
public:
FilterColumnsBlockInputStream(
BlockInputStreamPtr input_, const Names & columns_to_save_, bool throw_if_column_not_found_)
const BlockInputStreamPtr & input, const Names & columns_to_save_, bool throw_if_column_not_found_)
: columns_to_save(columns_to_save_), throw_if_column_not_found(throw_if_column_not_found_)
{
children.push_back(input_);
children.push_back(input);
}
String getName() const override

View File

@ -99,7 +99,7 @@ void IBlockInputStream::dumpTree(std::ostream & ostr, size_t indent, size_t mult
BlockInputStreams IBlockInputStream::getLeaves()
{
BlockInputStreams res;
getLeavesImpl(res);
getLeavesImpl(res, nullptr);
return res;
}
@ -122,7 +122,7 @@ void IBlockInputStream::getLeafRowsBytes(size_t & rows, size_t & bytes)
}
void IBlockInputStream::getLeavesImpl(BlockInputStreams & res, BlockInputStreamPtr this_shared_ptr)
void IBlockInputStream::getLeavesImpl(BlockInputStreams & res, const BlockInputStreamPtr & this_shared_ptr)
{
if (children.empty())
{

View File

@ -113,7 +113,7 @@ protected:
BlockInputStreams children;
private:
void getLeavesImpl(BlockInputStreams & res, BlockInputStreamPtr this_shared_ptr = nullptr);
void getLeavesImpl(BlockInputStreams & res, const BlockInputStreamPtr & this_shared_ptr);
size_t checkDepthImpl(size_t max_depth, size_t level) const;

View File

@ -142,53 +142,45 @@ void IProfilingBlockInputStream::updateExtremes(Block & block)
bool IProfilingBlockInputStream::checkLimits()
{
auto handle_overflow_mode = [this] (OverflowMode mode, const String & message, int code)
{
switch (mode)
{
case OverflowMode::THROW:
throw Exception(message, code);
case OverflowMode::BREAK:
return false;
default:
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
};
if (limits.mode == LIMITS_CURRENT)
{
/// Check current stream limitations (i.e. max_result_{rows,bytes})
if (limits.max_rows_to_read && info.rows > limits.max_rows_to_read)
{
if (limits.read_overflow_mode == OverflowMode::THROW)
throw Exception(std::string("Limit for result rows")
return handle_overflow_mode(limits.read_overflow_mode,
std::string("Limit for result rows")
+ " exceeded: read " + toString(info.rows)
+ " rows, maximum: " + toString(limits.max_rows_to_read),
ErrorCodes::TOO_MUCH_ROWS);
if (limits.read_overflow_mode == OverflowMode::BREAK)
return false;
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
ErrorCodes::TOO_MUCH_ROWS);
if (limits.max_bytes_to_read && info.bytes > limits.max_bytes_to_read)
{
if (limits.read_overflow_mode == OverflowMode::THROW)
throw Exception(std::string("Limit for result bytes (uncompressed)")
return handle_overflow_mode(limits.read_overflow_mode,
std::string("Limit for result bytes (uncompressed)")
+ " exceeded: read " + toString(info.bytes)
+ " bytes, maximum: " + toString(limits.max_bytes_to_read),
ErrorCodes::TOO_MUCH_BYTES);
if (limits.read_overflow_mode == OverflowMode::BREAK)
return false;
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
ErrorCodes::TOO_MUCH_BYTES);
}
if (limits.max_execution_time != 0
&& info.total_stopwatch.elapsed() > static_cast<UInt64>(limits.max_execution_time.totalMicroseconds()) * 1000)
{
if (limits.timeout_overflow_mode == OverflowMode::THROW)
throw Exception("Timeout exceeded: elapsed " + toString(info.total_stopwatch.elapsedSeconds())
return handle_overflow_mode(limits.timeout_overflow_mode,
"Timeout exceeded: elapsed " + toString(info.total_stopwatch.elapsedSeconds())
+ " seconds, maximum: " + toString(limits.max_execution_time.totalMicroseconds() / 1000000.0),
ErrorCodes::TIMEOUT_EXCEEDED);
if (limits.timeout_overflow_mode == OverflowMode::BREAK)
return false;
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
return true;
}
@ -244,28 +236,36 @@ void IProfilingBlockInputStream::progressImpl(const Progress & value)
&& ((limits.max_rows_to_read && total_rows_estimate > limits.max_rows_to_read)
|| (limits.max_bytes_to_read && bytes_processed > limits.max_bytes_to_read)))
{
if (limits.read_overflow_mode == OverflowMode::THROW)
switch (limits.read_overflow_mode)
{
if (limits.max_rows_to_read && total_rows_estimate > limits.max_rows_to_read)
throw Exception("Limit for rows to read exceeded: " + toString(total_rows_estimate)
+ " rows read (or to read), maximum: " + toString(limits.max_rows_to_read),
ErrorCodes::TOO_MUCH_ROWS);
else
throw Exception("Limit for (uncompressed) bytes to read exceeded: " + toString(bytes_processed)
+ " bytes read, maximum: " + toString(limits.max_bytes_to_read),
ErrorCodes::TOO_MUCH_BYTES);
}
else if (limits.read_overflow_mode == OverflowMode::BREAK)
{
/// For `break`, we will stop only if so many lines were actually read, and not just supposed to be read.
if ((limits.max_rows_to_read && rows_processed > limits.max_rows_to_read)
|| (limits.max_bytes_to_read && bytes_processed > limits.max_bytes_to_read))
case OverflowMode::THROW:
{
cancel();
if (limits.max_rows_to_read && total_rows_estimate > limits.max_rows_to_read)
throw Exception("Limit for rows to read exceeded: " + toString(total_rows_estimate)
+ " rows read (or to read), maximum: " + toString(limits.max_rows_to_read),
ErrorCodes::TOO_MUCH_ROWS);
else
throw Exception("Limit for (uncompressed) bytes to read exceeded: " + toString(bytes_processed)
+ " bytes read, maximum: " + toString(limits.max_bytes_to_read),
ErrorCodes::TOO_MUCH_BYTES);
break;
}
case OverflowMode::BREAK:
{
/// For `break`, we will stop only if so many lines were actually read, and not just supposed to be read.
if ((limits.max_rows_to_read && rows_processed > limits.max_rows_to_read)
|| (limits.max_bytes_to_read && bytes_processed > limits.max_bytes_to_read))
{
cancel();
}
break;
}
default:
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
else
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
size_t total_rows = process_list_elem->progress_in.total_rows;
@ -317,7 +317,7 @@ void IProfilingBlockInputStream::cancel()
}
void IProfilingBlockInputStream::setProgressCallback(ProgressCallback callback)
void IProfilingBlockInputStream::setProgressCallback(const ProgressCallback & callback)
{
progress_callback = callback;

View File

@ -69,7 +69,7 @@ public:
* The function takes the number of rows in the last block, the number of bytes in the last block.
* Note that the callback can be called from different threads.
*/
void setProgressCallback(ProgressCallback callback);
void setProgressCallback(const ProgressCallback & callback);
/** In this method:

View File

@ -3,8 +3,8 @@
namespace DB
{
LimitByBlockInputStream::LimitByBlockInputStream(const BlockInputStreamPtr & input, size_t group_size_, Names columns_)
: columns_names(columns_)
LimitByBlockInputStream::LimitByBlockInputStream(const BlockInputStreamPtr & input, size_t group_size_, const Names & columns)
: columns_names(columns)
, group_size(group_size_)
{
children.push_back(input);

View File

@ -18,7 +18,7 @@ namespace DB
class LimitByBlockInputStream : public IProfilingBlockInputStream
{
public:
LimitByBlockInputStream(const BlockInputStreamPtr & input, size_t group_size_, Names columns_);
LimitByBlockInputStream(const BlockInputStreamPtr & input, size_t group_size_, const Names & columns);
String getName() const override { return "LimitBy"; }

View File

@ -56,7 +56,7 @@ namespace DB
* data from sources can also be read in several threads (reading_threads)
* for optimal performance in the presence of a fast network or disks (from where these blocks are read).
*/
class MergingAggregatedMemoryEfficientBlockInputStream : public IProfilingBlockInputStream
class MergingAggregatedMemoryEfficientBlockInputStream final : public IProfilingBlockInputStream
{
public:
MergingAggregatedMemoryEfficientBlockInputStream(
@ -137,7 +137,7 @@ private:
std::exception_ptr exception;
/// It is necessary to give out blocks in the order of the key (bucket_num).
/// If the value is an empty block, you need to wait for its merge.
/// (This means the promise that there will be data here, which is important because the data should be given out
/// (This means the promise that there will be data here, which is important because the data should be given out
/// in the order of the key - bucket_num)
std::map<int, Block> merged_blocks;
std::mutex merged_blocks_mutex;

View File

@ -16,8 +16,8 @@ namespace ErrorCodes
TotalsHavingBlockInputStream::TotalsHavingBlockInputStream(
BlockInputStreamPtr input_,
bool overflow_row_, ExpressionActionsPtr expression_,
const BlockInputStreamPtr & input_,
bool overflow_row_, const ExpressionActionsPtr & expression_,
const std::string & filter_column_, TotalsMode totals_mode_, double auto_include_threshold_)
: overflow_row(overflow_row_),
expression(expression_), filter_column_name(filter_column_), totals_mode(totals_mode_),

View File

@ -20,8 +20,8 @@ private:
public:
TotalsHavingBlockInputStream(
BlockInputStreamPtr input_,
bool overflow_row_, ExpressionActionsPtr expression_,
const BlockInputStreamPtr & input_,
bool overflow_row_, const ExpressionActionsPtr & expression_,
const std::string & filter_column_, TotalsMode totals_mode_, double auto_include_threshold_);
String getName() const override { return "TotalsHaving"; }

View File

@ -29,13 +29,13 @@ namespace ErrorCodes
}
DataTypeArray::DataTypeArray(DataTypePtr nested_)
DataTypeArray::DataTypeArray(const DataTypePtr & nested_)
: enriched_nested(std::make_pair(nested_, std::make_shared<DataTypeVoid>())), nested{nested_}
{
offsets = std::make_shared<DataTypeNumber<ColumnArray::Offset_t>>();
}
DataTypeArray::DataTypeArray(DataTypeTraits::EnrichedDataTypePtr enriched_nested_)
DataTypeArray::DataTypeArray(const DataTypeTraits::EnrichedDataTypePtr & enriched_nested_)
: enriched_nested{enriched_nested_}, nested{enriched_nested.first}
{
offsets = std::make_shared<DataTypeNumber<ColumnArray::Offset_t>>();

View File

@ -19,8 +19,8 @@ private:
DataTypePtr offsets;
public:
DataTypeArray(DataTypePtr nested_);
DataTypeArray(DataTypeTraits::EnrichedDataTypePtr enriched_nested_);
DataTypeArray(const DataTypePtr & nested_);
DataTypeArray(const DataTypeTraits::EnrichedDataTypePtr & enriched_nested_);
std::string getName() const override
{

View File

@ -16,7 +16,7 @@ private:
public:
/// Some types could be still unknown.
DataTypeExpression(DataTypes argument_types_ = DataTypes(), DataTypePtr return_type_ = nullptr)
DataTypeExpression(const DataTypes & argument_types_ = DataTypes(), const DataTypePtr & return_type_ = nullptr)
: argument_types(argument_types_), return_type(return_type_) {}
std::string getName() const override;

View File

@ -22,7 +22,7 @@ namespace ErrorCodes
}
DataTypeNested::DataTypeNested(NamesAndTypesListPtr nested_)
DataTypeNested::DataTypeNested(const NamesAndTypesListPtr & nested_)
: nested(nested_)
{
}

View File

@ -17,7 +17,7 @@ private:
NamesAndTypesListPtr nested;
public:
DataTypeNested(NamesAndTypesListPtr nested_);
DataTypeNested(const NamesAndTypesListPtr & nested_);
std::string getName() const override;
const char * getFamilyName() const override { return "Nested"; }

View File

@ -21,7 +21,7 @@ namespace ErrorCodes
}
DataTypeNullable::DataTypeNullable(DataTypePtr nested_data_type_)
DataTypeNullable::DataTypeNullable(const DataTypePtr & nested_data_type_)
: nested_data_type{nested_data_type_}
{
if (!nested_data_type->canBeInsideNullable())

View File

@ -11,7 +11,7 @@ namespace DB
class DataTypeNullable final : public IDataType
{
public:
DataTypeNullable(DataTypePtr nested_data_type_);
DataTypeNullable(const DataTypePtr & nested_data_type_);
std::string getName() const override { return "Nullable(" + nested_data_type->getName() + ")"; }
const char * getFamilyName() const override { return "Nullable"; }
bool isNullable() const override { return true; }

View File

@ -16,7 +16,7 @@ class DataTypeTuple final : public IDataType
private:
DataTypes elems;
public:
DataTypeTuple(DataTypes elems_) : elems(elems_) {}
DataTypeTuple(const DataTypes & elems_) : elems(elems_) {}
std::string getName() const override;
const char * getFamilyName() const override { return "Tuple"; }

View File

@ -250,7 +250,7 @@ Block DictionaryBlockInputStream<DictionaryType, Key>::fillBlock(
data_types.reserve(keys.size());
const DictionaryStructure& dictionaty_structure = dictionary->getStructure();
if (data_types.empty() && dictionaty_structure.key)
for (const auto key : *dictionaty_structure.key)
for (const auto & key : *dictionaty_structure.key)
data_types.push_back(key.type);
for (const auto & column : view)

View File

@ -220,7 +220,7 @@ bool DictionaryStructure::isKeySizeFixed() const
if (!key)
return true;
for (const auto key_i : * key)
for (const auto & key_i : *key)
if (key_i.underlying_type == AttributeUnderlyingType::String)
return false;

View File

@ -54,7 +54,7 @@ BlockInputStreamPtr ExecutableDictionarySource::loadAll()
/** A stream, that also runs and waits for background thread
* (that will feed data into pipe to be read from the other side of the pipe).
*/
class BlockInputStreamWithBackgroundThread : public IProfilingBlockInputStream
class BlockInputStreamWithBackgroundThread final : public IProfilingBlockInputStream
{
public:
BlockInputStreamWithBackgroundThread(

View File

@ -366,7 +366,7 @@ void FlatDictionary::createAttributeImpl<String>(Attribute & attribute, const Fi
{
attribute.string_arena = std::make_unique<Arena>();
auto & null_value_ref = std::get<StringRef>(attribute.null_values);
const String string = null_value.get<typename NearestFieldType<String>::Type>();
const String & string = null_value.get<typename NearestFieldType<String>::Type>();
const auto string_in_arena = attribute.string_arena->insert(string.data(), string.size());
null_value_ref = StringRef{string_in_arena, string.size()};
std::get<ContainerPtrType<StringRef>>(attribute.arrays) =

View File

@ -1018,16 +1018,20 @@ bool Aggregator::checkLimits(size_t result_size, bool & no_more_keys) const
{
if (!no_more_keys && params.max_rows_to_group_by && result_size > params.max_rows_to_group_by)
{
if (params.group_by_overflow_mode == OverflowMode::THROW)
throw Exception("Limit for rows to GROUP BY exceeded: has " + toString(result_size)
+ " rows, maximum: " + toString(params.max_rows_to_group_by),
ErrorCodes::TOO_MUCH_ROWS);
else if (params.group_by_overflow_mode == OverflowMode::BREAK)
return false;
else if (params.group_by_overflow_mode == OverflowMode::ANY)
no_more_keys = true;
else
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
switch (params.group_by_overflow_mode)
{
case OverflowMode::THROW:
throw Exception("Limit for rows to GROUP BY exceeded: has " + toString(result_size)
+ " rows, maximum: " + toString(params.max_rows_to_group_by),
ErrorCodes::TOO_MUCH_ROWS);
case OverflowMode::BREAK:
return false;
case OverflowMode::ANY:
no_more_keys = true;
break;
}
}
return true;

View File

@ -37,7 +37,7 @@ Compiler::Compiler(const std::string & path_, size_t threads)
Poco::DirectoryIterator dir_end;
for (Poco::DirectoryIterator dir_it(path); dir_end != dir_it; ++dir_it)
{
std::string name = dir_it.name();
const std::string & name = dir_it.name();
if (endsWith(name, ".so"))
{
files.insert(name.substr(0, name.size() - 3));

View File

@ -770,7 +770,7 @@ StoragePtr Context::getTableImpl(const String & database_name, const String & ta
}
void Context::addExternalTable(const String & table_name, StoragePtr storage)
void Context::addExternalTable(const String & table_name, const StoragePtr & storage)
{
if (external_tables.end() != external_tables.find(table_name))
throw Exception("Temporary table " + table_name + " already exists.", ErrorCodes::TABLE_ALREADY_EXISTS);
@ -1217,7 +1217,7 @@ void Context::setZooKeeper(zkutil::ZooKeeperPtr zookeeper)
if (shared->zookeeper)
throw Exception("ZooKeeper client has already been set.", ErrorCodes::LOGICAL_ERROR);
shared->zookeeper = zookeeper;
shared->zookeeper = std::move(zookeeper);
}
zkutil::ZooKeeperPtr Context::getZooKeeper() const

View File

@ -173,7 +173,7 @@ public:
StoragePtr tryGetExternalTable(const String & table_name) const;
StoragePtr getTable(const String & database_name, const String & table_name) const;
StoragePtr tryGetTable(const String & database_name, const String & table_name) const;
void addExternalTable(const String & table_name, StoragePtr storage);
void addExternalTable(const String & table_name, const StoragePtr & storage);
StoragePtr tryRemoveExternalTable(const String & table_name);
void addDatabase(const String & database_name, const DatabasePtr & database);

View File

@ -237,7 +237,7 @@ struct ExpressionActionsChain
ExpressionActionsPtr actions;
Names required_output;
Step(ExpressionActionsPtr actions_ = nullptr, Names required_output_ = Names())
Step(const ExpressionActionsPtr & actions_ = nullptr, const Names & required_output_ = Names())
: actions(actions_), required_output(required_output_) {}
};

View File

@ -153,7 +153,7 @@ void removeDuplicateColumns(NamesAndTypesList & columns)
ExpressionAnalyzer::ExpressionAnalyzer(
const ASTPtr & ast_,
const Context & context_,
StoragePtr storage_,
const StoragePtr & storage_,
const NamesAndTypesList & columns_,
size_t subquery_depth_,
bool do_global_)
@ -1853,7 +1853,7 @@ struct ExpressionAnalyzer::ScopeStack
};
void ExpressionAnalyzer::getRootActions(ASTPtr ast, bool no_subqueries, bool only_consts, ExpressionActionsPtr & actions)
void ExpressionAnalyzer::getRootActions(const ASTPtr & ast, bool no_subqueries, bool only_consts, ExpressionActionsPtr & actions)
{
ScopeStack scopes(actions, settings);
getActionsImpl(ast, no_subqueries, only_consts, scopes);
@ -1919,7 +1919,7 @@ void ExpressionAnalyzer::getArrayJoinedColumns()
/// Fills the array_join_result_to_source: on which columns-arrays to replicate, and how to call them after that.
void ExpressionAnalyzer::getArrayJoinedColumnsImpl(ASTPtr ast)
void ExpressionAnalyzer::getArrayJoinedColumnsImpl(const ASTPtr & ast)
{
if (typeid_cast<ASTTablesInSelectQuery *>(ast.get()))
return;
@ -1973,7 +1973,7 @@ void ExpressionAnalyzer::getArrayJoinedColumnsImpl(ASTPtr ast)
}
void ExpressionAnalyzer::getActionsImpl(ASTPtr ast, bool no_subqueries, bool only_consts, ScopeStack & actions_stack)
void ExpressionAnalyzer::getActionsImpl(const ASTPtr & ast, bool no_subqueries, bool only_consts, ScopeStack & actions_stack)
{
/// If the result of the calculation already exists in the block.
if ((typeid_cast<ASTFunction *>(ast.get()) || typeid_cast<ASTLiteral *>(ast.get()))
@ -2615,7 +2615,7 @@ Block ExpressionAnalyzer::getSelectSampleBlock()
return temp_actions->getSampleBlock();
}
void ExpressionAnalyzer::getActionsBeforeAggregation(ASTPtr ast, ExpressionActionsPtr & actions, bool no_subqueries)
void ExpressionAnalyzer::getActionsBeforeAggregation(const ASTPtr & ast, ExpressionActionsPtr & actions, bool no_subqueries)
{
ASTFunction * node = typeid_cast<ASTFunction *>(ast.get());
@ -2848,7 +2848,7 @@ Names ExpressionAnalyzer::getRequiredColumns()
}
void ExpressionAnalyzer::getRequiredColumnsImpl(ASTPtr ast,
void ExpressionAnalyzer::getRequiredColumnsImpl(const ASTPtr & ast,
NameSet & required_columns, NameSet & ignored_names,
const NameSet & available_joined_columns, NameSet & required_joined_columns)
{

View File

@ -69,7 +69,7 @@ public:
ExpressionAnalyzer(
const ASTPtr & ast_,
const Context & context_,
StoragePtr storage_,
const StoragePtr & storage_,
const NamesAndTypesList & columns_,
size_t subquery_depth_ = 0,
bool do_global_ = false);
@ -269,17 +269,17 @@ private:
void addExternalStorage(ASTPtr & subquery_or_table_name);
void getArrayJoinedColumns();
void getArrayJoinedColumnsImpl(ASTPtr ast);
void getArrayJoinedColumnsImpl(const ASTPtr & ast);
void addMultipleArrayJoinAction(ExpressionActionsPtr & actions) const;
void addJoinAction(ExpressionActionsPtr & actions, bool only_types) const;
struct ScopeStack;
void getActionsImpl(ASTPtr ast, bool no_subqueries, bool only_consts, ScopeStack & actions_stack);
void getActionsImpl(const ASTPtr & ast, bool no_subqueries, bool only_consts, ScopeStack & actions_stack);
void getRootActions(ASTPtr ast, bool no_subqueries, bool only_consts, ExpressionActionsPtr & actions);
void getRootActions(const ASTPtr & ast, bool no_subqueries, bool only_consts, ExpressionActionsPtr & actions);
void getActionsBeforeAggregation(ASTPtr ast, ExpressionActionsPtr & actions, bool no_subqueries);
void getActionsBeforeAggregation(const ASTPtr & ast, ExpressionActionsPtr & actions, bool no_subqueries);
/** Add aggregation keys to aggregation_keys, aggregate functions to aggregate_descriptions,
* Create a set of columns aggregated_columns resulting after the aggregation, if any,
@ -295,7 +295,7 @@ private:
* The set of columns available_joined_columns are the columns available from JOIN, they are not needed for reading from the main table.
* Put in required_joined_columns the set of columns available from JOIN and needed.
*/
void getRequiredColumnsImpl(ASTPtr ast,
void getRequiredColumnsImpl(const ASTPtr & ast,
NameSet & required_columns, NameSet & ignored_names,
const NameSet & available_joined_columns, NameSet & required_joined_columns);

View File

@ -167,7 +167,7 @@ void InterpreterSelectQuery::basicInit(const BlockInputStreamPtr & input)
query_analyzer = std::make_unique<ExpressionAnalyzer>(query_ptr, context, storage, table_column_names, subquery_depth, !only_analyze);
/// Save the new temporary tables in the query context
for (auto & it : query_analyzer->getExternalTables())
for (const auto & it : query_analyzer->getExternalTables())
if (!context.tryGetExternalTable(it.first))
context.addExternalTable(it.first, it.second);

View File

@ -525,18 +525,22 @@ bool Join::insertFromBlock(const Block & block)
if (!checkSizeLimits())
{
if (overflow_mode == OverflowMode::THROW)
throw Exception("Join size limit exceeded."
" Rows: " + toString(getTotalRowCount()) +
", limit: " + toString(max_rows) +
". Bytes: " + toString(getTotalByteCount()) +
", limit: " + toString(max_bytes) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
switch (overflow_mode)
{
case OverflowMode::THROW:
throw Exception("Join size limit exceeded."
" Rows: " + toString(getTotalRowCount()) +
", limit: " + toString(max_rows) +
". Bytes: " + toString(getTotalByteCount()) +
", limit: " + toString(max_bytes) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
if (overflow_mode == OverflowMode::BREAK)
return false;
case OverflowMode::BREAK:
return false;
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
default:
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
}
return true;

View File

@ -206,7 +206,7 @@ bool ProcessListElement::tryGetQueryStreams(BlockInputStreamPtr & in, BlockOutpu
}
void ProcessList::addTemporaryTable(ProcessListElement & elem, const String & table_name, StoragePtr storage)
void ProcessList::addTemporaryTable(ProcessListElement & elem, const String & table_name, const StoragePtr & storage)
{
std::lock_guard<std::mutex> lock(mutex);

View File

@ -274,7 +274,7 @@ public:
}
/// Register temporary table. Then it is accessible by query_id and name.
void addTemporaryTable(ProcessListElement & elem, const String & table_name, StoragePtr storage);
void addTemporaryTable(ProcessListElement & elem, const String & table_name, const StoragePtr & storage);
enum class CancellationCode
{

View File

@ -181,18 +181,22 @@ bool Set::insertFromBlock(const Block & block, bool create_ordered_set)
if (!checkSetSizeLimits())
{
if (overflow_mode == OverflowMode::THROW)
throw Exception("IN-set size exceeded."
" Rows: " + toString(data.getTotalRowCount()) +
", limit: " + toString(max_rows) +
". Bytes: " + toString(data.getTotalByteCount()) +
", limit: " + toString(max_bytes) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
switch (overflow_mode)
{
case OverflowMode::THROW:
throw Exception("IN-set size exceeded."
" Rows: " + toString(data.getTotalRowCount()) +
", limit: " + toString(max_rows) +
". Bytes: " + toString(data.getTotalByteCount()) +
", limit: " + toString(max_bytes) + ".",
ErrorCodes::SET_SIZE_LIMIT_EXCEEDED);
if (overflow_mode == OverflowMode::BREAK)
return false;
case OverflowMode::BREAK:
return false;
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
default:
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
}
}
return true;

View File

@ -223,7 +223,7 @@ PKCondition::PKCondition(
const Context & context,
const NamesAndTypesList & all_columns,
const SortDescription & sort_descr_,
ExpressionActionsPtr pk_expr_)
const ExpressionActionsPtr & pk_expr_)
: sort_descr(sort_descr_), pk_expr(pk_expr_), prepared_sets(query_info.sets)
{
for (size_t i = 0; i < sort_descr.size(); ++i)

View File

@ -207,7 +207,7 @@ public:
const Context & context,
const NamesAndTypesList & all_columns,
const SortDescription & sort_descr,
ExpressionActionsPtr pk_expr);
const ExpressionActionsPtr & pk_expr);
/// Whether the condition is feasible in the key range.
/// left_pk and right_pk must contain all fields in the sort_descr in the appropriate order.