Better [#CLICKHOUSE-2].

This commit is contained in:
Alexey Milovidov 2017-12-02 00:13:25 +03:00
parent 1efef27af9
commit acd78e4891
51 changed files with 155 additions and 167 deletions

View File

@ -30,7 +30,7 @@ class AggregateFunctionCount final : public INullaryAggregateFunction<AggregateF
public:
String getName() const override { return "count"; }
void setArguments(const DataTypes & arguments) override
void setArguments(const DataTypes & /*arguments*/) override
{
/// You may pass some arguments. All of them are ignored.
}
@ -45,7 +45,7 @@ public:
++data(place).count;
}
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena * arena) const override
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
{
data(place).count += data(rhs).count;
}
@ -86,7 +86,7 @@ public:
return std::make_shared<DataTypeUInt64>();
}
void addImpl(AggregateDataPtr place, const IColumn & column, size_t row_num, Arena * arena) const
void addImpl(AggregateDataPtr place, const IColumn & column, size_t row_num, Arena *) const
{
data(place).count += !static_cast<const ColumnNullable &>(column).isNullAt(row_num);
}
@ -97,7 +97,7 @@ public:
throw Exception("Not Nullable argument passed to aggregate function count", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
}
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena * arena) const override
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
{
data(place).count += data(rhs).count;
}
@ -147,7 +147,7 @@ public:
is_nullable[i] = arguments[i]->isNullable() || arguments[i]->isNull();
}
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena * arena) const override
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override
{
for (size_t i = 0; i < number_of_arguments; ++i)
if (is_nullable[i] && static_cast<const ColumnNullable &>(*columns[i]).isNullAt(row_num))
@ -167,7 +167,7 @@ public:
return &addFree;
}
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena * arena) const override
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
{
data(place).count += data(rhs).count;
}

View File

@ -26,12 +26,12 @@ public:
}
/// Accumulate a value.
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override final
void add(AggregateDataPtr place, const IColumn ** /*columns*/, size_t /*row_num*/, Arena *) const override final
{
getDerived().addImpl(place);
}
static void addFree(const IAggregateFunction * that, AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *)
static void addFree(const IAggregateFunction * that, AggregateDataPtr place, const IColumn ** /*columns*/, size_t /*row_num*/, Arena *)
{
return static_cast<const Derived &>(*that).addImpl(place);
}

View File

@ -222,7 +222,7 @@ StringRef ColumnAggregateFunction::getDataAt(size_t n) const
return StringRef(reinterpret_cast<const char *>(&getData()[n]), sizeof(getData()[n]));
}
void ColumnAggregateFunction::insertData(const char * pos, size_t length)
void ColumnAggregateFunction::insertData(const char * pos, size_t /*length*/)
{
getData().push_back(*reinterpret_cast<const AggregateDataPtr *>(pos));
}
@ -281,12 +281,12 @@ void ColumnAggregateFunction::insertDefault()
function->create(getData().back());
}
StringRef ColumnAggregateFunction::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin) const
StringRef ColumnAggregateFunction::serializeValueIntoArena(size_t, Arena &, const char *&) const
{
throw Exception("Method serializeValueIntoArena is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
const char * ColumnAggregateFunction::deserializeAndInsertFromArena(const char * pos)
const char * ColumnAggregateFunction::deserializeAndInsertFromArena(const char *)
{
throw Exception("Method deserializeAndInsertFromArena is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
@ -353,7 +353,7 @@ Columns ColumnAggregateFunction::scatter(IColumn::ColumnIndex num_columns, const
return columns;
}
void ColumnAggregateFunction::getPermutation(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res) const
void ColumnAggregateFunction::getPermutation(bool /*reverse*/, size_t /*limit*/, int /*nan_direction_hint*/, IColumn::Permutation & res) const
{
size_t s = getData().size();
res.resize(s);

View File

@ -111,7 +111,7 @@ struct ColumnFixedString::less
}
};
void ColumnFixedString::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
void ColumnFixedString::getPermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res) const
{
size_t s = size();
res.resize(s);

View File

@ -113,12 +113,12 @@ void ColumnNullable::get(size_t n, Field & res) const
nested_column->get(n, res);
}
StringRef ColumnNullable::getDataAt(size_t n) const
StringRef ColumnNullable::getDataAt(size_t /*n*/) const
{
throw Exception{"Method getDataAt is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED};
}
void ColumnNullable::insertData(const char * pos, size_t length)
void ColumnNullable::insertData(const char * /*pos*/, size_t /*length*/)
{
throw Exception{"Method insertData is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED};
}

View File

@ -181,7 +181,7 @@ struct ColumnString::less
}
};
void ColumnString::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
void ColumnString::getPermutation(bool reverse, size_t limit, int /*nan_direction_hint*/, Permutation & res) const
{
size_t s = offsets.size();
res.resize(s);

View File

@ -41,12 +41,12 @@ void ColumnTuple::get(size_t n, Field & res) const
columns[i]->get(n, res_arr[i]);
}
StringRef ColumnTuple::getDataAt(size_t n) const
StringRef ColumnTuple::getDataAt(size_t) const
{
throw Exception("Method getDataAt is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
void ColumnTuple::insertData(const char * pos, size_t length)
void ColumnTuple::insertData(const char *, size_t)
{
throw Exception("Method insertData is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}

View File

@ -147,16 +147,16 @@ namespace
struct NoResultOffsetsBuilder
{
explicit NoResultOffsetsBuilder(IColumn::Offsets_t * res_offsets_) {}
void reserve(ssize_t result_size_hint, size_t src_size) {}
void insertOne(size_t array_size) {}
explicit NoResultOffsetsBuilder(IColumn::Offsets_t *) {}
void reserve(ssize_t, size_t) {}
void insertOne(size_t) {}
template <size_t SIMD_BYTES>
void insertChunk(
const IColumn::Offset_t * src_offsets_pos,
bool first,
IColumn::Offset_t chunk_offset,
size_t chunk_size)
const IColumn::Offset_t *,
bool,
IColumn::Offset_t,
size_t)
{
}
};

View File

@ -31,35 +31,35 @@ public:
void popBack(size_t n) override { s -= n; }
size_t byteSize() const override { return 0; }
size_t allocatedBytes() const override { return 0; }
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override { return 0; }
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
Field operator[](size_t n) const override { throw Exception("Cannot get value from " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
void get(size_t n, Field & res) const override { throw Exception("Cannot get value from " + getName(), ErrorCodes::NOT_IMPLEMENTED); };
void insert(const Field & x) override { throw Exception("Cannot insert element into " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
StringRef getDataAt(size_t n) const override { throw Exception("Method getDataAt is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
void insertData(const char * pos, size_t length) override { throw Exception("Method insertData is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
Field operator[](size_t) const override { throw Exception("Cannot get value from " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
void get(size_t, Field &) const override { throw Exception("Cannot get value from " + getName(), ErrorCodes::NOT_IMPLEMENTED); };
void insert(const Field &) override { throw Exception("Cannot insert element into " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
StringRef getDataAt(size_t) const override { throw Exception("Method getDataAt is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
void insertData(const char *, size_t) override { throw Exception("Method insertData is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED); }
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override
StringRef serializeValueIntoArena(size_t /*n*/, Arena & /*arena*/, char const *& /*begin*/) const override
{
throw Exception("Method serializeValueIntoArena is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
const char * deserializeAndInsertFromArena(const char * pos) override
const char * deserializeAndInsertFromArena(const char * /*pos*/) override
{
throw Exception("Method deserializeAndInsertFromArena is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
void updateHashWithValue(size_t n, SipHash & hash) const override
void updateHashWithValue(size_t /*n*/, SipHash & /*hash*/) const override
{
throw Exception("Method updateHashWithValue is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override
void insertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override
{
s += length;
}
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override
ColumnPtr filter(const Filter & filt, ssize_t /*result_size_hint*/) const override
{
return cloneDummy(countBytesInFilter(filt));
}
@ -72,7 +72,7 @@ public:
return cloneDummy(limit ? std::min(s, limit) : s);
}
void getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const override
void getPermutation(bool /*reverse*/, size_t /*limit*/, int /*nan_direction_hint*/, Permutation & res) const override
{
res.resize(s);
for (size_t i = 0; i < s; ++i)
@ -108,7 +108,7 @@ public:
throw Exception("Method gather is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
void getExtremes(Field & min, Field & max) const override
void getExtremes(Field &, Field &) const override
{
throw Exception("Method getExtremes is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}

View File

@ -267,33 +267,26 @@ private:
{
Handler(Self & parent_) : parent(parent_) {}
template <StreamUnionMode mode2 = mode>
void onBlock(Block & block, size_t thread_num,
typename std::enable_if<mode2 == StreamUnionMode::Basic>::type * = nullptr)
void onBlock(Block & block, size_t /*thread_num*/)
{
//std::cerr << "pushing block\n";
parent.output_queue.push(Payload(block));
}
template <StreamUnionMode mode2 = mode>
void onBlock(Block & block, BlockExtraInfo & extra_info, size_t thread_num,
typename std::enable_if<mode2 == StreamUnionMode::ExtraInfo>::type * = nullptr)
void onBlock(Block & block, BlockExtraInfo & extra_info, size_t /*thread_num*/)
{
//std::cerr << "pushing block with extra info\n";
parent.output_queue.push(Payload(block, extra_info));
}
void onFinish()
{
//std::cerr << "pushing end\n";
parent.output_queue.push(Payload());
}
void onFinishThread(size_t thread_num)
void onFinishThread(size_t /*thread_num*/)
{
}
void onException(std::exception_ptr & exception, size_t thread_num)
void onException(std::exception_ptr & exception, size_t /*thread_num*/)
{
//std::cerr << "pushing exception\n";

View File

@ -38,7 +38,7 @@ try
ExpressionAnalyzer analyzer(ast, context, {}, {NameAndTypePair("number", std::make_shared<DataTypeUInt64>())});
ExpressionActionsChain chain;
analyzer.appendSelect(chain, false);
analyzer.appendProjectResult(chain, false);
analyzer.appendProjectResult(chain);
chain.finalize();
ExpressionActionsPtr expression = chain.getLastActions();

View File

@ -44,7 +44,7 @@ try
ExpressionAnalyzer analyzer(ast, context, {}, {NameAndTypePair("number", std::make_shared<DataTypeUInt64>())});
ExpressionActionsChain chain;
analyzer.appendSelect(chain, false);
analyzer.appendProjectResult(chain, false);
analyzer.appendProjectResult(chain);
chain.finalize();
ExpressionActionsPtr expression = chain.getLastActions();

View File

@ -61,7 +61,7 @@ try
ExpressionAnalyzer analyzer(ast, context, {}, {NameAndTypePair("number", std::make_shared<DataTypeUInt64>())});
ExpressionActionsChain chain;
analyzer.appendSelect(chain, false);
analyzer.appendProjectResult(chain, false);
analyzer.appendProjectResult(chain);
chain.finalize();
ExpressionActionsPtr expression = chain.getLastActions();

View File

@ -307,7 +307,7 @@ void DatabaseOrdinary::createTable(
void DatabaseOrdinary::removeTable(
const Context & context,
const Context & /*context*/,
const String & table_name)
{
StoragePtr res = detachTable(table_name);
@ -385,7 +385,7 @@ void DatabaseOrdinary::renameTable(
time_t DatabaseOrdinary::getTableMetadataModificationTime(
const Context & context,
const Context & /*context*/,
const String & table_name)
{
String table_metadata_path = getTableMetadataPath(path, table_name);
@ -403,7 +403,7 @@ time_t DatabaseOrdinary::getTableMetadataModificationTime(
ASTPtr DatabaseOrdinary::getCreateQuery(
const Context & context,
const Context & /*context*/,
const String & table_name) const
{
ASTPtr ast = getCreateQueryImpl(path, table_name);

View File

@ -362,7 +362,7 @@ private:
for (const auto row : outdated_keys[key])
out[row] = static_cast<OutputType>(attribute_array[cell_idx]);
},
[&](const StringRef key, const size_t cell_idx)
[&](const StringRef key, const size_t)
{
for (const auto row : outdated_keys[key])
out[row] = get_default(row);
@ -492,7 +492,7 @@ private:
map[copied_key] = copied_value;
total_length += (attribute_value.size + 1) * outdated_keys[key].size();
},
[&](const StringRef key, const size_t cell_idx) {
[&](const StringRef key, const size_t) {
for (const auto row : outdated_keys[key])
total_length += get_default(row).size + 1;
});

View File

@ -913,7 +913,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants)
#define M(NAME) \
else if (data_variants.type == AggregatedDataVariants::Type::NAME) \
writeToTemporaryFileImpl(data_variants, *data_variants.NAME, block_out, path);
writeToTemporaryFileImpl(data_variants, *data_variants.NAME, block_out);
if (false) {}
APPLY_FOR_VARIANTS_TWO_LEVEL(M)
@ -987,8 +987,7 @@ template <typename Method>
void Aggregator::writeToTemporaryFileImpl(
AggregatedDataVariants & data_variants,
Method & method,
IBlockOutputStream & out,
const String & path)
IBlockOutputStream & out)
{
size_t max_temporary_block_size_rows = 0;
size_t max_temporary_block_size_bytes = 0;
@ -1239,7 +1238,7 @@ Block Aggregator::prepareBlockAndFillWithoutKey(AggregatedDataVariants & data_va
ColumnPlainPtrs & key_columns,
AggregateColumnsData & aggregate_columns,
ColumnPlainPtrs & final_aggregate_columns,
const Sizes & key_sizes,
const Sizes & /*key_sizes*/,
bool final)
{
if (data_variants.type == AggregatedDataVariants::Type::without_key || params.overflow_row)
@ -1282,7 +1281,7 @@ Block Aggregator::prepareBlockAndFillSingleLevel(AggregatedDataVariants & data_v
ColumnPlainPtrs & key_columns,
AggregateColumnsData & aggregate_columns,
ColumnPlainPtrs & final_aggregate_columns,
const Sizes & key_sizes,
const Sizes & /*key_sizes*/,
bool final)
{
#define M(NAME) \

View File

@ -1238,8 +1238,7 @@ protected:
void writeToTemporaryFileImpl(
AggregatedDataVariants & data_variants,
Method & method,
IBlockOutputStream & out,
const String & path);
IBlockOutputStream & out);
public:
/// Templates that are instantiated by dynamic code compilation - see SpecializedAggregator.h

View File

@ -2586,7 +2586,7 @@ bool ExpressionAnalyzer::appendOrderBy(ExpressionActionsChain & chain, bool only
return true;
}
void ExpressionAnalyzer::appendProjectResult(DB::ExpressionActionsChain & chain, bool only_types) const
void ExpressionAnalyzer::appendProjectResult(ExpressionActionsChain & chain) const
{
assertSelect();

View File

@ -111,7 +111,7 @@ public:
void appendSelect(ExpressionActionsChain & chain, bool only_types);
bool appendOrderBy(ExpressionActionsChain & chain, bool only_types);
/// Deletes all columns except mentioned by SELECT, arranges the remaining columns and renames them to aliases.
void appendProjectResult(ExpressionActionsChain & chain, bool only_types) const;
void appendProjectResult(ExpressionActionsChain & chain) const;
/// If `ast` is not a SELECT query, just gets all the actions to evaluate the expression.
/// If project_result, only the calculated values in the desired order, renamed to aliases, remain in the output block.

View File

@ -325,7 +325,7 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const
/// add new loadable object or update an existing version
if (object_it == std::end(loadable_objects))
loadable_objects.emplace(name, LoadableInfo{std::move(object_ptr), config_path});
loadable_objects.emplace(name, LoadableInfo{std::move(object_ptr), config_path, {}});
else
{
if (object_it->second.loadable)

View File

@ -19,7 +19,7 @@ InterpreterRenameQuery::InterpreterRenameQuery(const ASTPtr & query_ptr_, Contex
struct RenameDescription
{
RenameDescription(const ASTRenameQuery::Element & elem, const String & path, const String & current_database) :
RenameDescription(const ASTRenameQuery::Element & elem, const String & current_database) :
from_database_name(elem.from.database.empty() ? current_database : elem.from.database),
from_table_name(elem.from.table),
to_database_name(elem.to.database.empty() ? current_database : elem.to.database),
@ -73,7 +73,7 @@ BlockIO InterpreterRenameQuery::execute()
for (const auto & elem : rename.elements)
{
descriptions.emplace_back(elem, path, current_database);
descriptions.emplace_back(elem, current_database);
UniqueTableName from(descriptions.back().from_database_name, descriptions.back().from_table_name);
UniqueTableName to(descriptions.back().to_database_name, descriptions.back().to_table_name);

View File

@ -508,7 +508,7 @@ void InterpreterSelectQuery::executeSingleQuery()
before_order_and_select = chain.getLastActions();
chain.addStep();
query_analyzer->appendProjectResult(chain, !second_stage);
query_analyzer->appendProjectResult(chain);
final_projection = chain.getLastActions();
chain.finalize();

View File

@ -558,7 +558,7 @@ namespace
struct Adder<ASTTableJoin::Kind::Left, ASTTableJoin::Strictness::Any, Map>
{
static void addFound(const typename Map::const_iterator & it, size_t num_columns_to_add, ColumnPlainPtrs & added_columns,
size_t i, IColumn::Filter * filter, IColumn::Offset_t & current_offset, IColumn::Offsets_t * offsets,
size_t /*i*/, IColumn::Filter * /*filter*/, IColumn::Offset_t & /*current_offset*/, IColumn::Offsets_t * /*offsets*/,
size_t num_columns_to_skip)
{
for (size_t j = 0; j < num_columns_to_add; ++j)
@ -566,7 +566,7 @@ namespace
}
static void addNotFound(size_t num_columns_to_add, ColumnPlainPtrs & added_columns,
size_t i, IColumn::Filter * filter, IColumn::Offset_t & current_offset, IColumn::Offsets_t * offsets)
size_t /*i*/, IColumn::Filter * /*filter*/, IColumn::Offset_t & /*current_offset*/, IColumn::Offsets_t * /*offsets*/)
{
for (size_t j = 0; j < num_columns_to_add; ++j)
added_columns[j]->insertDefault();
@ -577,7 +577,7 @@ namespace
struct Adder<ASTTableJoin::Kind::Inner, ASTTableJoin::Strictness::Any, Map>
{
static void addFound(const typename Map::const_iterator & it, size_t num_columns_to_add, ColumnPlainPtrs & added_columns,
size_t i, IColumn::Filter * filter, IColumn::Offset_t & current_offset, IColumn::Offsets_t * offsets,
size_t i, IColumn::Filter * filter, IColumn::Offset_t & /*current_offset*/, IColumn::Offsets_t * /*offsets*/,
size_t num_columns_to_skip)
{
(*filter)[i] = 1;
@ -586,8 +586,8 @@ namespace
added_columns[j]->insertFrom(*it->second.block->getByPosition(num_columns_to_skip + j).column.get(), it->second.row_num);
}
static void addNotFound(size_t num_columns_to_add, ColumnPlainPtrs & added_columns,
size_t i, IColumn::Filter * filter, IColumn::Offset_t & current_offset, IColumn::Offsets_t * offsets)
static void addNotFound(size_t /*num_columns_to_add*/, ColumnPlainPtrs & /*added_columns*/,
size_t i, IColumn::Filter * filter, IColumn::Offset_t & /*current_offset*/, IColumn::Offsets_t * /*offsets*/)
{
(*filter)[i] = 0;
}
@ -597,7 +597,7 @@ namespace
struct Adder<KIND, ASTTableJoin::Strictness::All, Map>
{
static void addFound(const typename Map::const_iterator & it, size_t num_columns_to_add, ColumnPlainPtrs & added_columns,
size_t i, IColumn::Filter * filter, IColumn::Offset_t & current_offset, IColumn::Offsets_t * offsets,
size_t i, IColumn::Filter * /*filter*/, IColumn::Offset_t & current_offset, IColumn::Offsets_t * offsets,
size_t num_columns_to_skip)
{
size_t rows_joined = 0;
@ -614,7 +614,7 @@ namespace
}
static void addNotFound(size_t num_columns_to_add, ColumnPlainPtrs & added_columns,
size_t i, IColumn::Filter * filter, IColumn::Offset_t & current_offset, IColumn::Offsets_t * offsets)
size_t i, IColumn::Filter * /*filter*/, IColumn::Offset_t & current_offset, IColumn::Offsets_t * offsets)
{
if (KIND == ASTTableJoin::Kind::Inner)
{
@ -633,7 +633,7 @@ namespace
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map, bool has_null_map>
void NO_INLINE joinBlockImplTypeCase(
Block & block, const Map & map, size_t rows, const ConstColumnPlainPtrs & key_columns, size_t keys_size, const Sizes & key_sizes,
const Map & map, size_t rows, const ConstColumnPlainPtrs & key_columns, size_t keys_size, const Sizes & key_sizes,
size_t num_columns_to_add, size_t num_columns_to_skip, ColumnPlainPtrs & added_columns, ConstNullMapPtr null_map,
std::unique_ptr<IColumn::Filter> & filter,
IColumn::Offset_t & current_offset, std::unique_ptr<IColumn::Offsets_t> & offsets_to_replicate)
@ -667,18 +667,18 @@ namespace
template <ASTTableJoin::Kind KIND, ASTTableJoin::Strictness STRICTNESS, typename KeyGetter, typename Map>
void joinBlockImplType(
Block & block, const Map & map, size_t rows, const ConstColumnPlainPtrs & key_columns, size_t keys_size, const Sizes & key_sizes,
const Map & map, size_t rows, const ConstColumnPlainPtrs & key_columns, size_t keys_size, const Sizes & key_sizes,
size_t num_columns_to_add, size_t num_columns_to_skip, ColumnPlainPtrs & added_columns, ConstNullMapPtr null_map,
std::unique_ptr<IColumn::Filter> & filter,
IColumn::Offset_t & current_offset, std::unique_ptr<IColumn::Offsets_t> & offsets_to_replicate)
{
if (null_map)
joinBlockImplTypeCase<KIND, STRICTNESS, KeyGetter, Map, true>(
block, map, rows, key_columns, keys_size, key_sizes, num_columns_to_add, num_columns_to_skip,
map, rows, key_columns, keys_size, key_sizes, num_columns_to_add, num_columns_to_skip,
added_columns, null_map, filter, current_offset, offsets_to_replicate);
else
joinBlockImplTypeCase<KIND, STRICTNESS, KeyGetter, Map, false>(
block, map, rows, key_columns, keys_size, key_sizes, num_columns_to_add, num_columns_to_skip,
map, rows, key_columns, keys_size, key_sizes, num_columns_to_add, num_columns_to_skip,
added_columns, null_map, filter, current_offset, offsets_to_replicate);
}
}
@ -777,7 +777,7 @@ void Join::joinBlockImpl(Block & block, const Maps & maps) const
#define M(TYPE) \
case Join::Type::TYPE: \
joinBlockImplType<KIND, STRICTNESS, typename KeyGetterForType<Join::Type::TYPE>::Type>(\
block, *maps.TYPE, rows, key_columns, keys_size, key_sizes, \
*maps.TYPE, rows, key_columns, keys_size, key_sizes, \
num_columns_to_add, num_columns_to_skip, added_columns, null_map, \
filter, current_offset, offsets_to_replicate); \
break;

View File

@ -50,13 +50,13 @@ void QuotaForInterval::initFromConfig(const String & config_elem, time_t duratio
void QuotaForInterval::checkExceeded(time_t current_time, const String & quota_name, const String & user_name)
{
updateTime(current_time);
check(max.queries, used.queries, current_time, quota_name, user_name, "Queries");
check(max.errors, used.errors, current_time, quota_name, user_name, "Errors");
check(max.result_rows, used.result_rows, current_time, quota_name, user_name, "Total result rows");
check(max.result_bytes, used.result_bytes, current_time, quota_name, user_name, "Total result bytes");
check(max.read_rows, used.read_rows, current_time, quota_name, user_name, "Total rows read");
check(max.read_bytes, used.read_bytes, current_time, quota_name, user_name, "Total bytes read");
check(max.execution_time_usec / 1000000, used.execution_time_usec / 1000000, current_time, quota_name, user_name, "Total execution time");
check(max.queries, used.queries, quota_name, user_name, "Queries");
check(max.errors, used.errors, quota_name, user_name, "Errors");
check(max.result_rows, used.result_rows, quota_name, user_name, "Total result rows");
check(max.result_bytes, used.result_bytes, quota_name, user_name, "Total result bytes");
check(max.read_rows, used.read_rows, quota_name, user_name, "Total rows read");
check(max.read_bytes, used.read_bytes, quota_name, user_name, "Total bytes read");
check(max.execution_time_usec / 1000000, used.execution_time_usec / 1000000, quota_name, user_name, "Total execution time");
}
String QuotaForInterval::toString() const
@ -131,7 +131,7 @@ void QuotaForInterval::updateTime(time_t current_time)
}
void QuotaForInterval::check(
size_t max_amount, size_t used_amount, time_t current_time,
size_t max_amount, size_t used_amount,
const String & quota_name, const String & user_name, const char * resource_name)
{
if (max_amount && used_amount > max_amount)

View File

@ -153,7 +153,7 @@ struct QuotaForInterval
private:
/// Reset counters of used resources, if interval for quota is expired.
void updateTime(time_t current_time);
void check(size_t max_amount, size_t used_amount, time_t current_time,
void check(size_t max_amount, size_t used_amount,
const String & quota_name, const String & user_name, const char * resource_name);
};

View File

@ -17,7 +17,6 @@ namespace DB
template <typename T>
IColumn::Selector createBlockSelector(
const IColumn & column,
size_t num_shards,
const std::vector<UInt64> & slots)
{
const auto total_weight = slots.size();
@ -55,13 +54,13 @@ IColumn::Selector createBlockSelector(
/// Explicit instantiations to avoid code bloat in headers.
template IColumn::Selector createBlockSelector<UInt8>(const IColumn & column, size_t num_shards, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<UInt16>(const IColumn & column, size_t num_shards, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<UInt32>(const IColumn & column, size_t num_shards, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<UInt64>(const IColumn & column, size_t num_shards, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<Int8>(const IColumn & column, size_t num_shards, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<Int16>(const IColumn & column, size_t num_shards, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<Int32>(const IColumn & column, size_t num_shards, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<Int64>(const IColumn & column, size_t num_shards, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<UInt8>(const IColumn & column, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<UInt16>(const IColumn & column, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<UInt32>(const IColumn & column, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<UInt64>(const IColumn & column, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<Int8>(const IColumn & column, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<Int16>(const IColumn & column, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<Int32>(const IColumn & column, const std::vector<UInt64> & slots);
template IColumn::Selector createBlockSelector<Int64>(const IColumn & column, const std::vector<UInt64> & slots);
}

View File

@ -21,7 +21,6 @@ namespace DB
template <typename T>
IColumn::Selector createBlockSelector(
const IColumn & column,
size_t num_shards,
const std::vector<UInt64> & slots);
}

View File

@ -57,7 +57,7 @@ int main(int argc, char ** argv)
ExpressionAnalyzer analyzer(ast, context, {}, columns);
ExpressionActionsChain chain;
analyzer.appendSelect(chain, false);
analyzer.appendProjectResult(chain, false);
analyzer.appendProjectResult(chain);
chain.finalize();
ExpressionActionsPtr expression = chain.getLastActions();

View File

@ -291,12 +291,11 @@ IColumn::Selector DistributedBlockOutputStream::createSelector(Block block)
{
storage.getShardingKeyExpr()->execute(block);
const auto & key_column = block.getByName(storage.getShardingKeyColumnName());
size_t num_shards = cluster->getShardsInfo().size();
const auto & slot_to_shard = cluster->getSlotToShard();
#define CREATE_FOR_TYPE(TYPE) \
if (typeid_cast<const DataType ## TYPE *>(key_column.type.get())) \
return createBlockSelector<TYPE>(*key_column.column, num_shards, slot_to_shard);
return createBlockSelector<TYPE>(*key_column.column, slot_to_shard);
CREATE_FOR_TYPE(UInt8)
CREATE_FOR_TYPE(UInt16)

View File

@ -8,7 +8,7 @@ namespace DB
AllMergeSelector::PartsInPartition AllMergeSelector::select(
const Partitions & partitions,
const size_t max_total_size_to_merge)
const size_t /*max_total_size_to_merge*/)
{
size_t min_partition_size = 0;
Partitions::const_iterator best_partition;

View File

@ -325,7 +325,7 @@ private:
};
BlockOutputStreamPtr StorageBuffer::write(const ASTPtr & query, const Settings & settings)
BlockOutputStreamPtr StorageBuffer::write(const ASTPtr & /*query*/, const Settings & /*settings*/)
{
return std::make_shared<BufferBlockOutputStream>(*this);
}
@ -365,7 +365,7 @@ void StorageBuffer::shutdown()
*
* This kind of race condition make very hard to implement proper tests.
*/
bool StorageBuffer::optimize(const ASTPtr & query, const ASTPtr & partition, bool final, bool deduplicate, const Context & context)
bool StorageBuffer::optimize(const ASTPtr & /*query*/, const ASTPtr & partition, bool final, bool deduplicate, const Context & /*context*/)
{
if (partition)
throw Exception("Partition cannot be specified when optimizing table of type Buffer", ErrorCodes::NOT_IMPLEMENTED);

View File

@ -70,7 +70,7 @@ public:
void shutdown() override;
bool optimize(const ASTPtr & query, const ASTPtr & partition, bool final, bool deduplicate, const Context & context) override;
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override { name = new_table_name; }
void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & new_table_name) override { name = new_table_name; }
bool supportsSampling() const override { return true; }
bool supportsPrewhere() const override { return false; }

View File

@ -267,11 +267,11 @@ void StorageCatBoostPool::createSampleBlockAndColumns()
}
BlockInputStreams StorageCatBoostPool::read(const Names & column_names,
const SelectQueryInfo & query_info,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
QueryProcessingStage::Enum & /*processed_stage*/,
size_t max_block_size,
unsigned threads)
unsigned /*threads*/)
{
auto stream = std::make_shared<CatBoostDatasetBlockInputStream>(
data_description_file_name, "TSV", sample_block, context, max_block_size);

View File

@ -69,11 +69,11 @@ StorageDictionary::StorageDictionary(
BlockInputStreams StorageDictionary::read(
const Names & column_names,
const SelectQueryInfo & query_info,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
const unsigned threads)
const unsigned /*threads*/)
{
processed_stage = QueryProcessingStage::FetchColumns;
auto dictionary = context.getExternalDictionaries().getDictionary(dictionary_name);

View File

@ -184,12 +184,12 @@ StoragePtr StorageDistributed::createWithOwnCluster(
BlockInputStreams StorageDistributed::read(
const Names & column_names,
const Names & /*column_names*/,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
const unsigned num_streams)
const size_t /*max_block_size*/,
const unsigned /*num_streams*/)
{
auto cluster = getCluster();

View File

@ -65,7 +65,7 @@ public:
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;
void drop() override {}
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override { name = new_table_name; }
void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & new_table_name) override { name = new_table_name; }
/// in the sub-tables, you need to manually add and delete columns
/// the structure of the sub-table is not checked
void alter(const AlterCommands & params, const String & database_name, const String & table_name, const Context & context) override;

View File

@ -143,7 +143,7 @@ static Names extractColumnNames(const ASTPtr & node)
* </default>
* </graphite_rollup>
*/
static void appendGraphitePattern(const Context & context,
static void appendGraphitePattern(
const Poco::Util::AbstractConfiguration & config, const String & config_element, Graphite::Patterns & patterns)
{
Graphite::Pattern pattern;
@ -216,7 +216,7 @@ static void setGraphitePatternsFromConfig(const Context & context,
{
if (startsWith(key, "pattern"))
{
appendGraphitePattern(context, config, config_element + "." + key, params.patterns);
appendGraphitePattern(config, config_element + "." + key, params.patterns);
}
else if (key == "default")
{
@ -234,7 +234,7 @@ static void setGraphitePatternsFromConfig(const Context & context,
}
if (config.has(config_element + ".default"))
appendGraphitePattern(context, config, config_element + "." + ".default", params.patterns);
appendGraphitePattern(config, config_element + "." + ".default", params.patterns);
}

View File

@ -165,12 +165,12 @@ private:
BlockInputStreams StorageFile::read(
const Names & column_names,
const SelectQueryInfo & query_info,
const Names & /*column_names*/,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
QueryProcessingStage::Enum & /*processed_stage*/,
size_t max_block_size,
unsigned num_streams)
unsigned /*num_streams*/)
{
return BlockInputStreams(1, std::make_shared<StorageFileBlockInputStream>(*this, context, max_block_size));
}
@ -228,8 +228,8 @@ private:
};
BlockOutputStreamPtr StorageFile::write(
const ASTPtr & query,
const Settings & settings)
const ASTPtr & /*query*/,
const Settings & /*settings*/)
{
return std::make_shared<StorageFileBlockOutputStream>(*this);
}
@ -241,7 +241,7 @@ void StorageFile::drop()
}
void StorageFile::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name)
void StorageFile::rename(const String & new_path_to_db, const String & /*new_database_name*/, const String & new_table_name)
{
if (!is_db_table)
throw Exception("Can't rename table '" + table_name + "' binded to user-defined file (or FD)", ErrorCodes::DATABASE_ACCESS_DENIED);

View File

@ -233,7 +233,7 @@ StorageKafka::StorageKafka(
BlockInputStreams StorageKafka::read(
const Names & column_names,
const SelectQueryInfo & query_info,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -44,7 +44,7 @@ public:
size_t max_block_size,
unsigned num_streams) override;
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override
void rename(const String & /*new_path_to_db*/, const String & new_database_name, const String & new_table_name) override
{
table_name = new_table_name;
database_name = new_database_name;

View File

@ -490,7 +490,7 @@ void StorageLog::loadMarks()
}
void StorageLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name)
void StorageLog::rename(const String & new_path_to_db, const String & /*new_database_name*/, const String & new_table_name)
{
std::unique_lock<std::shared_mutex> lock(rwlock);
@ -544,7 +544,7 @@ const StorageLog::Marks & StorageLog::getMarksWithRealRowCount() const
BlockInputStreams StorageLog::read(
const Names & column_names,
const SelectQueryInfo & query_info,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,
@ -588,7 +588,7 @@ BlockInputStreams StorageLog::read(
BlockOutputStreamPtr StorageLog::write(
const ASTPtr & query, const Settings & settings)
const ASTPtr & /*query*/, const Settings & /*settings*/)
{
loadMarks();
return std::make_shared<LogBlockOutputStream>(*this);

View File

@ -88,10 +88,10 @@ StorageMemory::StorageMemory(
BlockInputStreams StorageMemory::read(
const Names & column_names,
const SelectQueryInfo & query_info,
const Context & context,
const SelectQueryInfo & /*query_info*/,
const Context & /*context*/,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,
size_t /*max_block_size*/,
unsigned num_streams)
{
check(column_names);
@ -122,7 +122,7 @@ BlockInputStreams StorageMemory::read(
BlockOutputStreamPtr StorageMemory::write(
const ASTPtr & query, const Settings & settings)
const ASTPtr & /*query*/, const Settings & /*settings*/)
{
return std::make_shared<MemoryBlockOutputStream>(*this);
}

View File

@ -41,7 +41,7 @@ public:
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override;
void drop() override;
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override { name = new_table_name; }
void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & new_table_name) override { name = new_table_name; }
private:
String name;

View File

@ -39,7 +39,7 @@ public:
unsigned num_streams) override;
void drop() override {}
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override { name = new_table_name; }
void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & new_table_name) override { name = new_table_name; }
/// you need to add and remove columns in the sub-tables manually
/// the structure of sub-tables is not checked

View File

@ -112,7 +112,7 @@ BlockInputStreams StorageMergeTree::read(
return reader.read(column_names, query_info, context, processed_stage, max_block_size, num_streams, nullptr, 0);
}
BlockOutputStreamPtr StorageMergeTree::write(const ASTPtr & query, const Settings & settings)
BlockOutputStreamPtr StorageMergeTree::write(const ASTPtr & /*query*/, const Settings & /*settings*/)
{
return std::make_shared<MergeTreeBlockOutputStream>(*this);
}
@ -130,7 +130,7 @@ void StorageMergeTree::drop()
data.dropAllData();
}
void StorageMergeTree::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name)
void StorageMergeTree::rename(const String & new_path_to_db, const String & /*new_database_name*/, const String & new_table_name)
{
std::string new_full_path = new_path_to_db + escapeForFileName(new_table_name) + '/';
@ -449,7 +449,7 @@ void StorageMergeTree::clearColumnInPartition(const ASTPtr & partition, const Fi
bool StorageMergeTree::optimize(
const ASTPtr & query, const ASTPtr & partition, bool final, bool deduplicate, const Context & context)
const ASTPtr & /*query*/, const ASTPtr & partition, bool final, bool deduplicate, const Context & context)
{
String partition_id;
if (partition)
@ -458,7 +458,7 @@ bool StorageMergeTree::optimize(
}
void StorageMergeTree::dropPartition(const ASTPtr & query, const ASTPtr & partition, bool detach, const Context & context)
void StorageMergeTree::dropPartition(const ASTPtr & /*query*/, const ASTPtr & partition, bool detach, const Context & context)
{
/// Asks to complete merges and does not allow them to start.
/// This protects against "revival" of data for a removed partition after completion of merge.

View File

@ -23,22 +23,22 @@ public:
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
BlockInputStreams read(
const Names & column_names,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,
unsigned num_streams) override
const Names &,
const SelectQueryInfo &,
const Context &,
QueryProcessingStage::Enum &,
size_t,
unsigned) override
{
return { std::make_shared<NullBlockInputStream>() };
}
BlockOutputStreamPtr write(const ASTPtr & query, const Settings & settings) override
BlockOutputStreamPtr write(const ASTPtr &, const Settings &) override
{
return std::make_shared<NullBlockOutputStream>();
}
void rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name) override
void rename(const String & /*new_path_to_db*/, const String & /*new_database_name*/, const String & new_table_name) override
{
name = new_table_name;
}

View File

@ -2357,7 +2357,7 @@ void StorageReplicatedMergeTree::assertNotReadonly() const
}
BlockOutputStreamPtr StorageReplicatedMergeTree::write(const ASTPtr & query, const Settings & settings)
BlockOutputStreamPtr StorageReplicatedMergeTree::write(const ASTPtr & /*query*/, const Settings & settings)
{
assertNotReadonly();
@ -2422,7 +2422,7 @@ bool StorageReplicatedMergeTree::optimize(const ASTPtr & query, const ASTPtr & p
void StorageReplicatedMergeTree::alter(const AlterCommands & params,
const String & database_name, const String & table_name, const Context & context)
const String & /*database_name*/, const String & /*table_name*/, const Context & context)
{
assertNotReadonly();

View File

@ -66,7 +66,7 @@ void SetOrJoinBlockOutputStream::writeSuffix()
BlockOutputStreamPtr StorageSetOrJoinBase::write(const ASTPtr & query, const Settings & settings)
BlockOutputStreamPtr StorageSetOrJoinBase::write(const ASTPtr & /*query*/, const Settings & /*settings*/)
{
++increment;
return std::make_shared<SetOrJoinBlockOutputStream>(*this, path, path + "tmp/", toString(increment) + ".bin");
@ -157,7 +157,7 @@ void StorageSetOrJoinBase::restoreFromFile(const String & file_path)
}
void StorageSetOrJoinBase::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name)
void StorageSetOrJoinBase::rename(const String & new_path_to_db, const String & /*new_database_name*/, const String & new_table_name)
{
/// Rename directory with data.
String new_path = new_path_to_db + escapeForFileName(new_table_name);

View File

@ -43,7 +43,7 @@ namespace ErrorCodes
class StripeLogBlockInputStream final : public IProfilingBlockInputStream
{
public:
StripeLogBlockInputStream(const NameSet & column_names_, StorageStripeLog & storage_, size_t max_read_buffer_size_,
StripeLogBlockInputStream(StorageStripeLog & storage_, size_t max_read_buffer_size_,
std::shared_ptr<const IndexForNativeFormat> & index_,
IndexForNativeFormat::Blocks::const_iterator index_begin_,
IndexForNativeFormat::Blocks::const_iterator index_end_)
@ -200,7 +200,7 @@ StorageStripeLog::StorageStripeLog(
}
void StorageStripeLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name)
void StorageStripeLog::rename(const String & new_path_to_db, const String & /*new_database_name*/, const String & new_table_name)
{
std::unique_lock<std::shared_mutex> lock(rwlock);
@ -215,10 +215,10 @@ void StorageStripeLog::rename(const String & new_path_to_db, const String & new_
BlockInputStreams StorageStripeLog::read(
const Names & column_names,
const SelectQueryInfo & query_info,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
const size_t /*max_block_size*/,
unsigned num_streams)
{
std::shared_lock<std::shared_mutex> lock(rwlock);
@ -249,7 +249,7 @@ BlockInputStreams StorageStripeLog::read(
std::advance(end, (stream + 1) * size / num_streams);
res.emplace_back(std::make_shared<StripeLogBlockInputStream>(
column_names_set, *this, context.getSettingsRef().max_read_buffer_size, index, begin, end));
*this, context.getSettingsRef().max_read_buffer_size, index, begin, end));
}
/// We do not keep read lock directly at the time of reading, because we read ranges of data that do not change.
@ -259,7 +259,7 @@ BlockInputStreams StorageStripeLog::read(
BlockOutputStreamPtr StorageStripeLog::write(
const ASTPtr & query, const Settings & settings)
const ASTPtr & /*query*/, const Settings & /*settings*/)
{
return std::make_shared<StripeLogBlockOutputStream>(*this);
}

View File

@ -367,7 +367,7 @@ void StorageTinyLog::addFiles(const String & column_name, const IDataType & type
}
void StorageTinyLog::rename(const String & new_path_to_db, const String & new_database_name, const String & new_table_name)
void StorageTinyLog::rename(const String & new_path_to_db, const String & /*new_database_name*/, const String & new_table_name)
{
/// Rename directory with data.
Poco::File(path + escapeForFileName(name)).renameTo(new_path_to_db + escapeForFileName(new_table_name));
@ -383,11 +383,11 @@ void StorageTinyLog::rename(const String & new_path_to_db, const String & new_da
BlockInputStreams StorageTinyLog::read(
const Names & column_names,
const SelectQueryInfo & query_info,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
const unsigned num_streams)
const unsigned /*num_streams*/)
{
check(column_names);
processed_stage = QueryProcessingStage::FetchColumns;
@ -397,7 +397,7 @@ BlockInputStreams StorageTinyLog::read(
BlockOutputStreamPtr StorageTinyLog::write(
const ASTPtr & query, const Settings & settings)
const ASTPtr & /*query*/, const Settings & /*settings*/)
{
return std::make_shared<TinyLogBlockOutputStream>(*this);
}

View File

@ -36,11 +36,11 @@ StorageView::StorageView(
BlockInputStreams StorageView::read(
const Names & column_names,
const SelectQueryInfo & query_info,
const SelectQueryInfo & /*query_info*/,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
const unsigned num_streams)
const size_t /*max_block_size*/,
const unsigned /*num_streams*/)
{
processed_stage = QueryProcessingStage::FetchColumns;
return InterpreterSelectQuery(inner_query->clone(), context, column_names).executeWithoutUnion();