Get rid of ASTSet, because it is not really an AST and it lead to bugs [#CLICKHOUSE-3148].

This commit is contained in:
Alexey Milovidov 2017-07-15 06:48:36 +03:00
parent 200a1e27e3
commit 9406b6a5aa
98 changed files with 289 additions and 332 deletions

View File

@ -158,7 +158,7 @@ bool filterBlockWithQuery(const ASTPtr & query, Block & block, const Context & c
if (!expression_ast)
return false;
/// Let's parse and calculate the expression.
/// Let's analyze and calculate the expression.
ExpressionAnalyzer analyzer(expression_ast, context, {}, block.getColumnsList());
ExpressionActionsPtr actions = analyzer.getActions(false);
actions->execute(block);

View File

@ -102,7 +102,7 @@ void RemoteBlockInputStream::sendExternalTables()
{
StoragePtr cur = table.second;
QueryProcessingStage::Enum stage = QueryProcessingStage::Complete;
DB::BlockInputStreams input = cur->read(cur->getColumnNamesList(), ASTPtr(), context,
BlockInputStreams input = cur->read(cur->getColumnNamesList(), {}, context,
stage, DEFAULT_BLOCK_SIZE, 1);
if (input.size() == 0)
res.push_back(std::make_pair(std::make_shared<OneBlockInputStream>(cur->getSampleBlock()), table.first));

View File

@ -50,7 +50,7 @@ try
QueryProcessingStage::Enum stage;
BlockInputStreamPtr in;
in = table->read(column_names, 0, context, stage, 8192, 1)[0];
in = table->read(column_names, {}, context, stage, 8192, 1)[0];
in = std::make_shared<ExpressionBlockInputStream>(in, expression);
in = std::make_shared<LimitBlockInputStream>(in, 10, std::max(static_cast<Int64>(0), static_cast<Int64>(n) - 10));

View File

@ -55,7 +55,7 @@ try
QueryProcessingStage::Enum stage;
BlockInputStreamPtr in = table->read(column_names, 0, context, stage, 8192, 1)[0];
BlockInputStreamPtr in = table->read(column_names, {}, context, stage, 8192, 1)[0];
in = std::make_shared<FilterBlockInputStream>(in, expression, 1);
in = std::make_shared<LimitBlockInputStream>(in, 10, std::max(static_cast<Int64>(0), static_cast<Int64>(n) - 10));

View File

@ -128,7 +128,7 @@ int main(int argc, char ** argv)
QueryProcessingStage::Enum stage;
BlockInputStreamPtr in = table->read(column_names, 0, context, stage, 8192, 1)[0];
BlockInputStreamPtr in = table->read(column_names, {}, context, stage, 8192, 1)[0];
in = std::make_shared<FilterBlockInputStream>(in, expression, 4);
//in = std::make_shared<LimitBlockInputStream>(in, 10, 0);

View File

@ -72,7 +72,7 @@ try
QueryProcessingStage::Enum stage;
BlockInputStreamPtr in = table->read(column_names, 0, context, stage, 8192, 1)[0];
BlockInputStreamPtr in = table->read(column_names, {}, context, stage, 8192, 1)[0];
ForkBlockInputStreams fork(in);

View File

@ -103,7 +103,7 @@ try
if (argc == 2 && 0 == strcmp(argv[1], "read"))
{
QueryProcessingStage::Enum stage;
BlockInputStreamPtr in = table->read(column_names, 0, Context::createGlobal(), stage, 8192, 1)[0];
BlockInputStreamPtr in = table->read(column_names, {}, Context::createGlobal(), stage, 8192, 1)[0];
WriteBufferFromFileDescriptor out1(STDOUT_FILENO);
CompressedWriteBuffer out2(out1);
NativeBlockOutputStream out3(out2, ClickHouseRevision::get());

View File

@ -145,7 +145,7 @@ try
QueryProcessingStage::Enum stage;
BlockInputStreamPtr in = table->read(column_names, 0, Context::createGlobal(), stage, argc == 2 ? atoi(argv[1]) : 65536, 1)[0];
BlockInputStreamPtr in = table->read(column_names, {}, Context::createGlobal(), stage, argc == 2 ? atoi(argv[1]) : 65536, 1)[0];
in = std::make_shared<PartialSortingBlockInputStream>(in, sort_columns);
in = std::make_shared<MergeSortingBlockInputStream>(in, sort_columns, DEFAULT_BLOCK_SIZE, 0, 0, "");
//in = std::make_shared<LimitBlockInputStream>(in, 10, 0);

View File

@ -35,9 +35,9 @@ void test1()
QueryProcessingStage::Enum stage3;
BlockInputStreams streams;
streams.emplace_back(std::make_shared<LimitBlockInputStream>(table->read(column_names, 0, context, stage1, 1, 1)[0], 30, 30000));
streams.emplace_back(std::make_shared<LimitBlockInputStream>(table->read(column_names, 0, context, stage2, 1, 1)[0], 30, 2000));
streams.emplace_back(std::make_shared<LimitBlockInputStream>(table->read(column_names, 0, context, stage3, 1, 1)[0], 30, 100));
streams.emplace_back(std::make_shared<LimitBlockInputStream>(table->read(column_names, {}, context, stage1, 1, 1)[0], 30, 30000));
streams.emplace_back(std::make_shared<LimitBlockInputStream>(table->read(column_names, {}, context, stage2, 1, 1)[0], 30, 2000));
streams.emplace_back(std::make_shared<LimitBlockInputStream>(table->read(column_names, {}, context, stage3, 1, 1)[0], 30, 100));
UnionBlockInputStream<> union_stream(streams, nullptr, 2);
@ -84,15 +84,15 @@ void test2()
BlockInputStreams streams;
BlockInputStreamPtr stream1 = std::make_shared<LimitBlockInputStream>(table->read(column_names, 0, context, stage1, 1, 1)[0], 30, 30000);
BlockInputStreamPtr stream1 = std::make_shared<LimitBlockInputStream>(table->read(column_names, {}, context, stage1, 1, 1)[0], 30, 30000);
stream1 = std::make_shared<BlockExtraInfoInputStream>(stream1, extra_info1);
streams.emplace_back(stream1);
BlockInputStreamPtr stream2 = std::make_shared<LimitBlockInputStream>(table->read(column_names, 0, context, stage2, 1, 1)[0], 30, 2000);
BlockInputStreamPtr stream2 = std::make_shared<LimitBlockInputStream>(table->read(column_names, {}, context, stage2, 1, 1)[0], 30, 2000);
stream2 = std::make_shared<BlockExtraInfoInputStream>(stream2, extra_info2);
streams.emplace_back(stream2);
BlockInputStreamPtr stream3 = std::make_shared<LimitBlockInputStream>(table->read(column_names, 0, context, stage3, 1, 1)[0], 30, 100);
BlockInputStreamPtr stream3 = std::make_shared<LimitBlockInputStream>(table->read(column_names, {}, context, stage3, 1, 1)[0], 30, 100);
stream3 = std::make_shared<BlockExtraInfoInputStream>(stream3, extra_info3);
streams.emplace_back(stream3);

View File

@ -35,7 +35,7 @@ try
StoragePtr table = context.getTable("default", "hits6");
QueryProcessingStage::Enum stage;
BlockInputStreams streams = table->read(column_names, nullptr, context, stage, settings.max_block_size, settings.max_threads);
BlockInputStreams streams = table->read(column_names, {}, context, stage, settings.max_block_size, settings.max_threads);
for (size_t i = 0, size = streams.size(); i < size; ++i)
streams[i] = std::make_shared<AsynchronousBlockInputStream>(streams[i]);

View File

@ -2,6 +2,7 @@
#include <Columns/ColumnsNumber.h>
#include <Functions/IFunction.h>
#include <Common/Stopwatch.h>
#include <Common/typeid_cast.h>
#include <IO/WriteHelpers.h>
#include <iomanip>

View File

@ -623,8 +623,118 @@ void ExpressionAnalyzer::findExternalTables(ASTPtr & ast)
}
static std::pair<String, String> getDatabaseAndTableNameFromIdentifier(const ASTIdentifier & identifier)
{
std::pair<String, String> res;
res.second = identifier.name;
if (!identifier.children.empty())
{
if (identifier.children.size() != 2)
throw Exception("Qualified table name could have only two components", ErrorCodes::LOGICAL_ERROR);
res.first = typeid_cast<const ASTIdentifier &>(*identifier.children[0]).name;
res.second = typeid_cast<const ASTIdentifier &>(*identifier.children[1]).name;
}
return res;
}
static std::shared_ptr<InterpreterSelectQuery> interpretSubquery(
ASTPtr & subquery_or_table_name, const Context & context, size_t subquery_depth, const Names & required_columns);
const ASTPtr & subquery_or_table_name, const Context & context, size_t subquery_depth, const Names & required_columns)
{
/// Subquery or table name. The name of the table is similar to the subquery `SELECT * FROM t`.
const ASTSubquery * subquery = typeid_cast<const ASTSubquery *>(subquery_or_table_name.get());
const ASTIdentifier * table = typeid_cast<const ASTIdentifier *>(subquery_or_table_name.get());
if (!subquery && !table)
throw Exception("IN/JOIN supports only SELECT subqueries.", ErrorCodes::BAD_ARGUMENTS);
/** The subquery in the IN / JOIN section does not have any restrictions on the maximum size of the result.
* Because the result of this query is not the result of the entire query.
* Constraints work instead
* max_rows_in_set, max_bytes_in_set, set_overflow_mode,
* max_rows_in_join, max_bytes_in_join, join_overflow_mode,
* which are checked separately (in the Set, Join objects).
*/
Context subquery_context = context;
Settings subquery_settings = context.getSettings();
subquery_settings.limits.max_result_rows = 0;
subquery_settings.limits.max_result_bytes = 0;
/// The calculation of `extremes` does not make sense and is not necessary (if you do it, then the `extremes` of the subquery can be taken instead of the whole query).
subquery_settings.extremes = 0;
subquery_context.setSettings(subquery_settings);
ASTPtr query;
if (table)
{
/// create ASTSelectQuery for "SELECT * FROM table" as if written by hand
const auto select_query = std::make_shared<ASTSelectQuery>();
query = select_query;
const auto select_expression_list = std::make_shared<ASTExpressionList>();
select_query->select_expression_list = select_expression_list;
select_query->children.emplace_back(select_query->select_expression_list);
/// get columns list for target table
auto database_table = getDatabaseAndTableNameFromIdentifier(*table);
const auto & storage = context.getTable(database_table.first, database_table.second);
const auto & columns = storage->getColumnsListNonMaterialized();
select_expression_list->children.reserve(columns.size());
/// manually substitute column names in place of asterisk
for (const auto & column : columns)
select_expression_list->children.emplace_back(std::make_shared<ASTIdentifier>(
StringRange{}, column.name));
select_query->replaceDatabaseAndTable(database_table.first, database_table.second);
}
else
{
query = subquery->children.at(0);
/** Columns with the same name can be specified in a subquery. For example, SELECT x, x FROM t
* This is bad, because the result of such a query can not be saved to the table, because the table can not have the same name columns.
* Saving to the table is required for GLOBAL subqueries.
*
* To avoid this situation, we will rename the same columns.
*/
std::set<std::string> all_column_names;
std::set<std::string> assigned_column_names;
if (ASTSelectQuery * select = typeid_cast<ASTSelectQuery *>(query.get()))
{
for (auto & expr : select->select_expression_list->children)
all_column_names.insert(expr->getAliasOrColumnName());
for (auto & expr : select->select_expression_list->children)
{
auto name = expr->getAliasOrColumnName();
if (!assigned_column_names.insert(name).second)
{
size_t i = 1;
while (all_column_names.end() != all_column_names.find(name + "_" + toString(i)))
++i;
name = name + "_" + toString(i);
expr = expr->clone(); /// Cancels fuse of the same expressions in the tree.
expr->setAlias(name);
all_column_names.insert(name);
assigned_column_names.insert(name);
}
}
}
}
if (required_columns.empty())
return std::make_shared<InterpreterSelectQuery>(
query, subquery_context, QueryProcessingStage::Complete, subquery_depth + 1);
else
return std::make_shared<InterpreterSelectQuery>(
query, subquery_context, required_columns, QueryProcessingStage::Complete, subquery_depth + 1);
}
void ExpressionAnalyzer::addExternalStorage(ASTPtr & subquery_or_table_name_or_table_expression)
@ -1380,7 +1490,7 @@ void ExpressionAnalyzer::makeSetsForIndex()
makeSetsForIndexImpl(ast, storage->getSampleBlock());
}
void ExpressionAnalyzer::makeSetsForIndexImpl(ASTPtr & node, const Block & sample_block)
void ExpressionAnalyzer::makeSetsForIndexImpl(const ASTPtr & node, const Block & sample_block)
{
for (auto & child : node->children)
makeSetsForIndexImpl(child, sample_block);
@ -1409,120 +1519,6 @@ void ExpressionAnalyzer::makeSetsForIndexImpl(ASTPtr & node, const Block & sampl
}
static std::pair<String, String> getDatabaseAndTableNameFromIdentifier(const ASTIdentifier & identifier)
{
std::pair<String, String> res;
res.second = identifier.name;
if (!identifier.children.empty())
{
if (identifier.children.size() != 2)
throw Exception("Qualified table name could have only two components", ErrorCodes::LOGICAL_ERROR);
res.first = typeid_cast<const ASTIdentifier &>(*identifier.children[0]).name;
res.second = typeid_cast<const ASTIdentifier &>(*identifier.children[1]).name;
}
return res;
}
static std::shared_ptr<InterpreterSelectQuery> interpretSubquery(
ASTPtr & subquery_or_table_name, const Context & context, size_t subquery_depth, const Names & required_columns)
{
/// Subquery or table name. The name of the table is similar to the subquery `SELECT * FROM t`.
const ASTSubquery * subquery = typeid_cast<const ASTSubquery *>(subquery_or_table_name.get());
const ASTIdentifier * table = typeid_cast<const ASTIdentifier *>(subquery_or_table_name.get());
if (!subquery && !table)
throw Exception("IN/JOIN supports only SELECT subqueries.", ErrorCodes::BAD_ARGUMENTS);
/** The subquery in the IN / JOIN section does not have any restrictions on the maximum size of the result.
* Because the result of this query is not the result of the entire query.
* Constraints work instead
* max_rows_in_set, max_bytes_in_set, set_overflow_mode,
* max_rows_in_join, max_bytes_in_join, join_overflow_mode,
* which are checked separately (in the Set, Join objects).
*/
Context subquery_context = context;
Settings subquery_settings = context.getSettings();
subquery_settings.limits.max_result_rows = 0;
subquery_settings.limits.max_result_bytes = 0;
/// The calculation of `extremes` does not make sense and is not necessary (if you do it, then the `extremes` of the subquery can be taken instead of the whole query).
subquery_settings.extremes = 0;
subquery_context.setSettings(subquery_settings);
ASTPtr query;
if (table)
{
/// create ASTSelectQuery for "SELECT * FROM table" as if written by hand
const auto select_query = std::make_shared<ASTSelectQuery>();
query = select_query;
const auto select_expression_list = std::make_shared<ASTExpressionList>();
select_query->select_expression_list = select_expression_list;
select_query->children.emplace_back(select_query->select_expression_list);
/// get columns list for target table
auto database_table = getDatabaseAndTableNameFromIdentifier(*table);
const auto & storage = context.getTable(database_table.first, database_table.second);
const auto & columns = storage->getColumnsListNonMaterialized();
select_expression_list->children.reserve(columns.size());
/// manually substitute column names in place of asterisk
for (const auto & column : columns)
select_expression_list->children.emplace_back(std::make_shared<ASTIdentifier>(
StringRange{}, column.name));
select_query->replaceDatabaseAndTable(database_table.first, database_table.second);
}
else
{
query = subquery->children.at(0);
/** Columns with the same name can be specified in a subquery. For example, SELECT x, x FROM t
* This is bad, because the result of such a query can not be saved to the table, because the table can not have the same name columns.
* Saving to the table is required for GLOBAL subqueries.
*
* To avoid this situation, we will rename the same columns.
*/
std::set<std::string> all_column_names;
std::set<std::string> assigned_column_names;
if (ASTSelectQuery * select = typeid_cast<ASTSelectQuery *>(query.get()))
{
for (auto & expr : select->select_expression_list->children)
all_column_names.insert(expr->getAliasOrColumnName());
for (auto & expr : select->select_expression_list->children)
{
auto name = expr->getAliasOrColumnName();
if (!assigned_column_names.insert(name).second)
{
size_t i = 1;
while (all_column_names.end() != all_column_names.find(name + "_" + toString(i)))
++i;
name = name + "_" + toString(i);
expr = expr->clone(); /// Cancels fuse of the same expressions in the tree.
expr->setAlias(name);
all_column_names.insert(name);
assigned_column_names.insert(name);
}
}
}
}
if (required_columns.empty())
return std::make_shared<InterpreterSelectQuery>(
query, subquery_context, QueryProcessingStage::Complete, subquery_depth + 1);
else
return std::make_shared<InterpreterSelectQuery>(
query, subquery_context, required_columns, QueryProcessingStage::Complete, subquery_depth + 1);
}
void ExpressionAnalyzer::makeSet(const ASTFunction * node, const Block & sample_block)
{
/** You need to convert the right argument to a set.

View File

@ -13,16 +13,16 @@ class Context;
class ExpressionActions;
struct ExpressionActionsChain;
class Set;
using SetPtr = std::shared_ptr<Set>;
using PreparedSets = std::unordered_map<IAST*, SetPtr>;
class Join;
using JoinPtr = std::shared_ptr<Join>;
class IAST;
using ASTPtr = std::shared_ptr<IAST>;
class Set;
using SetPtr = std::shared_ptr<Set>;
using PreparedSets = std::unordered_map<IAST*, SetPtr>;
class IBlockInputStream;
using BlockInputStreamPtr = std::shared_ptr<IBlockInputStream>;
@ -247,7 +247,6 @@ private:
void optimizeIfWithConstantConditionImpl(ASTPtr & current_ast, Aliases & aliases) const;
bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & value) const;
/// Transform the value enumeration or subquery into ASTSet. `node` - `in` or `notIn` function.
void makeSet(const ASTFunction * node, const Block & sample_block);
/// Adds a list of ALIAS columns from the table

View File

@ -836,13 +836,17 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns()
if (max_streams > 1 && !is_remote)
max_streams *= settings.max_streams_to_max_threads_ratio;
SelectQueryInfo query_info;
query_info.query = query_ptr;
query_info.sets = query_analyzer->getPreparedSets();
/// PREWHERE optimization
{
auto optimize_prewhere = [&](auto & merge_tree)
{
/// Try transferring some condition from WHERE to PREWHERE if enabled and viable
if (settings.optimize_move_to_prewhere && query.where_expression && !query.prewhere_expression && !query.final())
MergeTreeWhereOptimizer{query_ptr, context, merge_tree.getData(), required_columns, log};
MergeTreeWhereOptimizer{query_info, context, merge_tree.getData(), required_columns, log};
};
if (const StorageMergeTree * merge_tree = typeid_cast<const StorageMergeTree *>(storage.get()))
@ -851,7 +855,7 @@ QueryProcessingStage::Enum InterpreterSelectQuery::executeFetchColumns()
optimize_prewhere(*merge_tree);
}
streams = storage->read(required_columns, query_ptr, context, from_stage, max_block_size, max_streams);
streams = storage->read(required_columns, query_info, context, from_stage, max_block_size, max_streams);
if (alias_actions)
{

View File

@ -164,7 +164,4 @@ using SetPtr = std::shared_ptr<Set>;
using ConstSetPtr = std::shared_ptr<const Set>;
using Sets = std::vector<SetPtr>;
/// Information about calculated sets in right hand side of IN.
using PreparedSets = std::unordered_map<IAST*, SetPtr>;
}

View File

@ -10,6 +10,7 @@
#include <Storages/IStorage.h>
#include <Databases/IDatabase.h>
#include <Databases/DatabaseOrdinary.h>
#include <Common/typeid_cast.h>
#include <iostream>
#include <vector>

View File

@ -1,40 +0,0 @@
#pragma once
#include <Parsers/IAST.h>
namespace DB
{
class Set;
/** The set. During the calculation, the expression in the IN section is replaced by the set
* - a subquery or an explicit enumeration of values.
* TODO: This is bad practice, lead to bugs. Remove it.
*/
class ASTSet : public IAST
{
public:
std::shared_ptr<Set> set;
String column_name;
bool is_explicit = false;
ASTSet(const String & column_name_) : column_name(column_name_) {}
ASTSet(const StringRange range_, const String & column_name_) : IAST(range_), column_name(column_name_) {}
String getID() const override { return "Set_" + getColumnName(); }
ASTPtr clone() const override { return std::make_shared<ASTSet>(*this); }
String getColumnName() const override { return column_name; }
protected:
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override
{
/** Prepared set. In user requests, this does not happen, but this happens after the intermediate query transformation.
* Output it for not real (this will not be a valid query, but it will show that there was a set).
*/
settings.ostr << (settings.hilite ? hilite_keyword : "")
<< "(...)"
<< (settings.hilite ? hilite_none : "");
}
};
}

View File

@ -2,19 +2,28 @@
#include <stdlib.h>
#include <fcntl.h>
#include <signal.h>
#include <iostream>
#include <fstream>
#include <iomanip>
#include <experimental/optional>
#include <unordered_set>
#include <algorithm>
#include <experimental/optional>
#include <boost/program_options.hpp>
#include <Poco/File.h>
#include <Poco/Util/Application.h>
#include <Common/ClickHouseRevision.h>
#include <Common/Stopwatch.h>
#include <Common/Exception.h>
#include <Common/ShellCommand.h>
#include <Common/ExternalTable.h>
#include <Common/UnicodeBar.h>
#include <Common/formatReadable.h>
#include <Common/NetException.h>
#include <common/readline_use.h>
#include <Common/typeid_cast.h>
#include <Core/Types.h>
#include <Core/QueryProcessingStage.h>
#include <IO/ReadBufferFromFileDescriptor.h>
@ -25,7 +34,6 @@
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <DataStreams/AsynchronousBlockInputStream.h>
#include <DataStreams/TabSeparatedRowInputStream.h>
#include <Parsers/ParserQuery.h>
#include <Parsers/ASTSetQuery.h>
#include <Parsers/ASTUseQuery.h>
@ -39,13 +47,6 @@
#include <Interpreters/Context.h>
#include <Client/Connection.h>
#include "InterruptListener.h"
#include <Common/ExternalTable.h>
#include <Common/UnicodeBar.h>
#include <Common/formatReadable.h>
#include <Columns/ColumnString.h>
#include <Common/NetException.h>
#include <common/readline_use.h>
#include <Common/typeid_cast.h>
#include <Functions/registerFunctions.h>
#include <AggregateFunctions/registerAggregateFunctions.h>

View File

@ -4,6 +4,7 @@
#include <Common/Exception.h>
#include <Core/QueryProcessingStage.h>
#include <Storages/ITableDeclaration.h>
#include <Storages/SelectQueryInfo.h>
#include <Poco/RWLock.h>
#include <memory>
#include <experimental/optional>
@ -30,10 +31,6 @@ class IStorage;
using StoragePtr = std::shared_ptr<IStorage>;
class IAST;
using ASTPtr = std::shared_ptr<IAST>;
struct Settings;
class AlterCommands;
@ -156,7 +153,7 @@ public:
* (indexes, locks, etc.)
* Returns a stream with which you can read data sequentially
* or multiple streams for parallel data reading.
* The into `processed_stage` info is also written to what stage the request was processed.
* The `processed_stage` info is also written to what stage the request was processed.
* (Normally, the function only reads the columns from the list, but in other cases,
* for example, the request can be partially processed on a remote server.)
*
@ -171,7 +168,7 @@ public:
*/
virtual BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -129,7 +129,7 @@ static RelativeSize convertAbsoluteSampleSizeToRelative(const ASTPtr & node, siz
BlockInputStreams MergeTreeDataSelectExecutor::read(
const Names & column_names_to_return,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -188,7 +188,7 @@ BlockInputStreams MergeTreeDataSelectExecutor::read(
/// If `_part` virtual column is requested, we try to use it as an index.
Block virtual_columns_block = getBlockWithPartColumn(parts);
if (part_column_queried)
VirtualColumnUtils::filterBlockWithQuery(query, virtual_columns_block, context);
VirtualColumnUtils::filterBlockWithQuery(query_info.query, virtual_columns_block, context);
std::multiset<String> part_values = VirtualColumnUtils::extractSingleValueFromBlock<String>(virtual_columns_block, "_part");
@ -199,9 +199,9 @@ BlockInputStreams MergeTreeDataSelectExecutor::read(
SortDescription sort_descr = data.getSortDescription();
ColumnsWithTypeAndName date_columns = {{DataTypeDate{}.createColumn(), std::make_shared<DataTypeDate>(), data.date_column_name}};
PKCondition key_condition(query, context, available_real_and_virtual_columns, sort_descr,
PKCondition key_condition(query_info, context, available_real_and_virtual_columns, sort_descr,
data.getPrimaryExpression());
PKCondition date_condition(query, context, available_real_and_virtual_columns,
PKCondition date_condition(query_info, context, available_real_and_virtual_columns,
SortDescription(1, SortColumnDescription(data.date_column_name, 1, 1)),
std::make_shared<ExpressionActions>(date_columns, settings));
@ -254,7 +254,7 @@ BlockInputStreams MergeTreeDataSelectExecutor::read(
RelativeSize relative_sample_size = 0;
RelativeSize relative_sample_offset = 0;
ASTSelectQuery & select = *typeid_cast<ASTSelectQuery*>(&*query);
ASTSelectQuery & select = typeid_cast<ASTSelectQuery &>(*query_info.query);
auto select_sample_size = select.sample_size();
auto select_sample_offset = select.sample_offset();

View File

@ -22,7 +22,7 @@ public:
*/
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -34,8 +34,7 @@ static constexpr auto global_not_in_function_name = "globalNotIn";
MergeTreeWhereOptimizer::MergeTreeWhereOptimizer(
ASTPtr & query,
const PreparedSets & prepared_sets,
SelectQueryInfo & query_info,
const Context & context,
const MergeTreeData & data,
const Names & column_names,
@ -44,12 +43,12 @@ MergeTreeWhereOptimizer::MergeTreeWhereOptimizer(
[] (const SortColumnDescription & col) { return col.column_name; })},
table_columns{ext::map<std::unordered_set>(data.getColumnsList(),
[] (const NameAndTypePair & col) { return col.name; })},
block_with_constants{PKCondition::getBlockWithConstants(query, context, data.getColumnsList())},
prepared_sets(prepared_sets),
block_with_constants{PKCondition::getBlockWithConstants(query_info.query, context, data.getColumnsList())},
prepared_sets(query_info.sets),
log{log}
{
calculateColumnSizes(data, column_names);
auto & select = typeid_cast<ASTSelectQuery &>(*query);
auto & select = typeid_cast<ASTSelectQuery &>(*query_info.query);
determineArrayJoinedNames(select);
optimize(select);
}

View File

@ -5,7 +5,7 @@
#include <set>
#include <boost/noncopyable.hpp>
#include <Core/Block.h>
#include <Interpreters/Set.h>
#include <Storages/SelectQueryInfo.h>
namespace Poco { class Logger; }
@ -13,9 +13,6 @@ namespace Poco { class Logger; }
namespace DB
{
class IAST;
using ASTPtr = std::shared_ptr<IAST>;
class ASTSelectQuery;
class ASTFunction;
class MergeTreeData;
@ -36,8 +33,7 @@ class MergeTreeWhereOptimizer : private boost::noncopyable
{
public:
MergeTreeWhereOptimizer(
ASTPtr & query,
const PreparedSets & prepared_sets,
SelectQueryInfo & query_info,
const Context & context,
const MergeTreeData & data,
const Names & column_names,

View File

@ -218,9 +218,13 @@ Block PKCondition::getBlockWithConstants(
}
PKCondition::PKCondition(const ASTPtr & query, const Context & context, const NamesAndTypesList & all_columns,
const SortDescription & sort_descr_, ExpressionActionsPtr pk_expr_)
: sort_descr(sort_descr_), pk_expr(pk_expr_)
PKCondition::PKCondition(
const SelectQueryInfo & query_info,
const Context & context,
const NamesAndTypesList & all_columns,
const SortDescription & sort_descr_,
ExpressionActionsPtr pk_expr_)
: sort_descr(sort_descr_), pk_expr(pk_expr_), prepared_sets(query_info.sets)
{
for (size_t i = 0; i < sort_descr.size(); ++i)
{
@ -232,10 +236,10 @@ PKCondition::PKCondition(const ASTPtr & query, const Context & context, const Na
/** Evaluation of expressions that depend only on constants.
* For the index to be used, if it is written, for example `WHERE Date = toDate(now())`.
*/
Block block_with_constants = getBlockWithConstants(query, context, all_columns);
Block block_with_constants = getBlockWithConstants(query_info.query, context, all_columns);
/// Trasform WHERE section to Reverse Polish notation
const ASTSelectQuery & select = typeid_cast<const ASTSelectQuery &>(*query);
const ASTSelectQuery & select = typeid_cast<const ASTSelectQuery &>(*query_info.query);
if (select.where_expression)
{
traverseAST(select.where_expression, context, block_with_constants);
@ -544,7 +548,7 @@ bool PKCondition::atomFromAST(const ASTPtr & node, const Context & context, Bloc
key_arg_pos = 1;
is_constant_transformed = true;
}
else if (typeid_cast<const ASTSet *>(args[1].get())
else if (prepared_sets.count(args[1].get())
&& isPrimaryKeyPossiblyWrappedByMonotonicFunctions(args[0], context, key_column_num, key_expr_type, chain))
{
key_arg_pos = 0;
@ -898,10 +902,10 @@ bool PKCondition::mayBeTrueInRangeImpl(const std::vector<Range> & key_ranges, co
{
auto in_func = typeid_cast<const ASTFunction *>(element.in_function.get());
const ASTs & args = typeid_cast<const ASTExpressionList &>(*in_func->arguments).children;
auto ast_set = typeid_cast<const ASTSet *>(args[1].get());
if (in_func && ast_set)
PreparedSets::const_iterator it = prepared_sets.find(args[1].get());
if (in_func && it != prepared_sets.end())
{
rpn_stack.push_back(ast_set->set->mayBeTrueInRange(*key_range));
rpn_stack.push_back(it->second->mayBeTrueInRange(*key_range));
if (element.function == RPNElement::FUNCTION_NOT_IN_SET)
rpn_stack.back() = !rpn_stack.back();
}
@ -961,14 +965,6 @@ bool PKCondition::mayBeTrueAfter(
}
static const ASTSet & inFunctionToSet(const ASTPtr & in_function)
{
const auto & in_func = typeid_cast<const ASTFunction &>(*in_function);
const auto & args = typeid_cast<const ASTExpressionList &>(*in_func.arguments).children;
const auto & ast_set = typeid_cast<const ASTSet &>(*args[1]);
return ast_set;
}
String PKCondition::RPNElement::toString() const
{
auto print_wrapped_column = [this](std::ostringstream & ss)
@ -998,7 +994,7 @@ String PKCondition::RPNElement::toString() const
{
ss << "(";
print_wrapped_column(ss);
ss << (function == FUNCTION_IN_SET ? " in " : " notIn ") << inFunctionToSet(in_function).set->describe();
ss << (function == FUNCTION_IN_SET ? " in set" : " notIn set");
ss << ")";
return ss.str();
}

View File

@ -10,6 +10,7 @@
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTLiteral.h>
#include <Storages/SelectQueryInfo.h>
namespace DB
@ -202,8 +203,7 @@ class PKCondition
public:
/// Does not include the SAMPLE section. all_columns - the set of all columns of the table.
PKCondition(
const ASTPtr & query,
const PreparedSets & prepared_sets,
const SelectQueryInfo & query_info,
const Context & context,
const NamesAndTypesList & all_columns,
const SortDescription & sort_descr,
@ -330,6 +330,7 @@ private:
SortDescription sort_descr;
ColumnIndices pk_columns;
ExpressionActionsPtr pk_expr;
PreparedSets prepared_sets;
};
}

View File

@ -114,7 +114,7 @@ private:
BlockInputStreams StorageBuffer::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,
@ -131,7 +131,7 @@ BlockInputStreams StorageBuffer::read(
if (destination.get() == this)
throw Exception("Destination table is myself. Read will cause infinite loop.", ErrorCodes::INFINITE_LOOP);
streams_from_dst = destination->read(column_names, query, context, processed_stage, max_block_size, num_streams);
streams_from_dst = destination->read(column_names, query_info, context, processed_stage, max_block_size, num_streams);
}
BlockInputStreams streams_from_buffers;
@ -144,7 +144,7 @@ BlockInputStreams StorageBuffer::read(
*/
if (processed_stage > QueryProcessingStage::FetchColumns)
for (auto & stream : streams_from_buffers)
stream = InterpreterSelectQuery(query, context, processed_stage, 0, stream).execute().in;
stream = InterpreterSelectQuery(query_info.query, context, processed_stage, 0, stream).execute().in;
streams_from_dst.insert(streams_from_dst.end(), streams_from_buffers.begin(), streams_from_buffers.end());
return streams_from_dst;

View File

@ -58,7 +58,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -35,7 +35,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -68,7 +68,7 @@ StorageDictionary::StorageDictionary(
BlockInputStreams StorageDictionary::read(
const Names & column_names,
const ASTPtr& query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -41,7 +41,7 @@ public:
std::string getTableName() const override { return table_name; }
const NamesAndTypesList & getColumnsListImpl() const override { return *columns; }
BlockInputStreams read(const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size = DEFAULT_BLOCK_SIZE,

View File

@ -189,7 +189,7 @@ StoragePtr StorageDistributed::createWithOwnCluster(
BlockInputStreams StorageDistributed::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -206,7 +206,7 @@ BlockInputStreams StorageDistributed::read(
: QueryProcessingStage::WithMergeableState;
const auto & modified_query_ast = rewriteSelectQuery(
query, remote_database, remote_table);
query_info.query, remote_database, remote_table);
Tables external_tables;
@ -218,7 +218,6 @@ BlockInputStreams StorageDistributed::read(
/** The functionality of shard_multiplexing is not completed - turn it off.
* (Because connecting to different shards within a single thread is not done in parallel.)
* For more information, see https: //███████████.yandex-team.ru/METR-18300
*/
//bool enable_shard_multiplexing = !(ast.order_expression_list && !ast.group_expression_list);
bool enable_shard_multiplexing = false;
@ -354,7 +353,6 @@ void StorageDistributed::reshardPartitions(
/** The functionality of shard_multiplexing is not completed - turn it off.
* (Because connecting to different shards within a single thread is not done in parallel.)
* For more information, see https: //███████████.yandex-team.ru/METR-18300
*/
bool enable_shard_multiplexing = false;
@ -433,7 +431,6 @@ BlockInputStreams StorageDistributed::describe(const Context & context, const Se
/** The functionality of shard_multiplexing is not completed - turn it off.
* (Because connecting connections to different shards within a single thread is not done in parallel.)
* For more information, see https://███████████.yandex-team.ru/METR-18300
*/
bool enable_shard_multiplexing = false;

View File

@ -57,7 +57,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -155,7 +155,7 @@ private:
BlockInputStreams StorageFile::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -74,7 +74,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -775,7 +775,7 @@ const Marks & StorageLog::getMarksWithRealRowCount() const
BlockInputStreams StorageLog::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -52,7 +52,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -112,13 +112,13 @@ bool StorageMaterializedView::hasColumn(const String & column_name) const
BlockInputStreams StorageMaterializedView::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
const unsigned num_streams)
{
return getInnerTable()->read(column_names, query, context, processed_stage, max_block_size, num_streams);
return getInnerTable()->read(column_names, query_info, context, processed_stage, max_block_size, num_streams);
}
BlockOutputStreamPtr StorageMaterializedView::write(const ASTPtr & query, const Settings & settings)

View File

@ -43,7 +43,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -96,7 +96,7 @@ StorageMemory::StorageMemory(
BlockInputStreams StorageMemory::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -36,7 +36,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -109,7 +109,7 @@ static Names collectIdentifiersInFirstLevelOfSelectQuery(ASTPtr ast)
BlockInputStreams StorageMerge::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -131,6 +131,8 @@ BlockInputStreams StorageMerge::read(
*/
StorageListWithLocks selected_tables = getSelectedTables();
const ASTPtr & query = query_info.query;
/// If PREWHERE is used in query, you need to make sure that all tables support this.
if (typeid_cast<const ASTSelectQuery &>(*query).prewhere_expression)
for (const auto & elem : selected_tables)
@ -178,7 +180,7 @@ BlockInputStreams StorageMerge::read(
QueryProcessingStage::Enum processed_stage_in_source_table = processed_stage;
source_streams = table->read(
real_column_names,
modified_query_ast,
{ modified_query_ast, query_info.sets },
modified_context,
processed_stage_in_source_table,
max_block_size,
@ -206,7 +208,7 @@ BlockInputStreams StorageMerge::read(
QueryProcessingStage::Enum processed_stage_in_source_table = processed_stage;
BlockInputStreams streams = table->read(
real_column_names,
modified_query_ast,
{ modified_query_ast, query_info.sets },
modified_context,
processed_stage_in_source_table,
max_block_size,

View File

@ -34,7 +34,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -101,13 +101,13 @@ StorageMergeTree::~StorageMergeTree()
BlockInputStreams StorageMergeTree::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
const unsigned num_streams)
{
return reader.read(column_names, query, context, processed_stage, max_block_size, num_streams, nullptr, 0);
return reader.read(column_names, query_info, context, processed_stage, max_block_size, num_streams, nullptr, 0);
}
BlockOutputStreamPtr StorageMergeTree::write(const ASTPtr & query, const Settings & settings)

View File

@ -51,7 +51,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -26,7 +26,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -2288,7 +2288,7 @@ StorageReplicatedMergeTree::~StorageReplicatedMergeTree()
BlockInputStreams StorageReplicatedMergeTree::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -2340,7 +2340,7 @@ BlockInputStreams StorageReplicatedMergeTree::read(
}
return reader.read(
column_names, query, context, processed_stage, max_block_size, num_streams, &part_index, max_block_number_to_read);
column_names, query_info, context, processed_stage, max_block_size, num_streams, &part_index, max_block_number_to_read);
}

View File

@ -123,7 +123,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -222,7 +222,7 @@ void StorageStripeLog::rename(const String & new_path_to_db, const String & new_
BlockInputStreams StorageStripeLog::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -31,7 +31,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -527,7 +527,7 @@ void StorageTinyLog::rename(const String & new_path_to_db, const String & new_da
BlockInputStreams StorageTinyLog::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -31,7 +31,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -111,7 +111,7 @@ private:
BlockInputStreams StorageTrivialBuffer::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,
@ -130,7 +130,7 @@ BlockInputStreams StorageTrivialBuffer::read(
throw Exception("Destination table is myself. Read will cause infinite loop.",
ErrorCodes::INFINITE_LOOP);
streams = destination->read(column_names, query, context,
streams = destination->read(column_names, query_info, context,
processed_stage, max_block_size, num_streams);
}
@ -156,12 +156,13 @@ BlockInputStreams StorageTrivialBuffer::read(
*/
if (processed_stage > QueryProcessingStage::FetchColumns)
for (auto & stream : streams_from_buffers)
stream = InterpreterSelectQuery(query, context, processed_stage, 0, stream).execute().in;
stream = InterpreterSelectQuery(query_info.query, context, processed_stage, 0, stream).execute().in;
streams.insert(streams.end(), streams_from_buffers.begin(), streams_from_buffers.end());
return streams;
}
template <typename DeduplicationController>
void StorageTrivialBuffer::addBlock(const Block & block, DeduplicationController & deduplication_controller)
{
@ -186,6 +187,7 @@ void StorageTrivialBuffer::addBlock(const Block & block, DeduplicationController
}
}
void StorageTrivialBuffer::flush(bool check_thresholds, bool is_called_from_background)
{
Block block_to_write;
@ -273,6 +275,7 @@ void StorageTrivialBuffer::flush(bool check_thresholds, bool is_called_from_back
}
class TrivialBufferBlockOutputStream : public IBlockOutputStream
{
public:
@ -334,11 +337,13 @@ private:
StorageTrivialBuffer & buffer;
};
BlockOutputStreamPtr StorageTrivialBuffer::write(const ASTPtr & query, const Settings & settings)
{
return std::make_shared<TrivialBufferBlockOutputStream>(*this);
}
void StorageTrivialBuffer::startup()
{
flush_thread = std::thread(&StorageTrivialBuffer::flushThread, this);
@ -363,6 +368,7 @@ void StorageTrivialBuffer::shutdown()
}
/** NOTE If you do OPTIMIZE after insertion,
* it does not guarantee that all data will be in destination table at the time of
* next SELECT just after OPTIMIZE.
@ -393,7 +399,6 @@ bool StorageTrivialBuffer::optimize(const ASTPtr & query, const String & partiti
}
bool StorageTrivialBuffer::checkThresholds(
const time_t current_time, const size_t additional_rows, const size_t additional_bytes) const
{
@ -408,6 +413,7 @@ bool StorageTrivialBuffer::checkThresholds(
}
bool StorageTrivialBuffer::checkThresholdsImpl(const size_t rows, const size_t bytes,
const time_t time_passed) const
{
@ -438,6 +444,7 @@ bool StorageTrivialBuffer::checkThresholdsImpl(const size_t rows, const size_t b
return false;
}
void StorageTrivialBuffer::flushThread()
{
setThreadName("BufferFlush");
@ -456,6 +463,7 @@ void StorageTrivialBuffer::flushThread()
while (!shutdown_event.tryWait(1000));
}
void StorageTrivialBuffer::writeBlockToDestination(const Block & block, StoragePtr table)
{
if (no_destination || !block)
@ -518,6 +526,7 @@ void StorageTrivialBuffer::writeBlockToDestination(const Block & block, StorageP
block_io.out->writeSuffix();
}
void StorageTrivialBuffer::alter(
const AlterCommands & params, const String & database_name,
const String & table_name, const Context & context)

View File

@ -52,7 +52,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -77,7 +77,7 @@ void StorageView::extractDependentTable(const ASTSelectQuery & query)
BlockInputStreams StorageView::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -27,7 +27,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -26,7 +26,7 @@ StorageSystemAsynchronousMetrics::StorageSystemAsynchronousMetrics(const std::st
BlockInputStreams StorageSystemAsynchronousMetrics::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -25,7 +25,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -23,7 +23,7 @@ StorageSystemBuildOptions::StorageSystemBuildOptions(const std::string & name_)
BlockInputStreams StorageSystemBuildOptions::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -24,7 +24,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -31,7 +31,7 @@ StorageSystemClusters::StorageSystemClusters(const std::string & name_)
BlockInputStreams StorageSystemClusters::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -26,7 +26,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -34,7 +34,7 @@ StorageSystemColumns::StorageSystemColumns(const std::string & name_)
BlockInputStreams StorageSystemColumns::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -57,7 +57,7 @@ BlockInputStreams StorageSystemColumns::read(
block.insert(ColumnWithTypeAndName(database_column, std::make_shared<DataTypeString>(), "database"));
/// Filter block with `database` column.
VirtualColumnUtils::filterBlockWithQuery(query, block, context);
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block, context);
if (!block.rows())
return BlockInputStreams();
@ -95,7 +95,7 @@ BlockInputStreams StorageSystemColumns::read(
}
/// Filter block with `database` and `table` columns.
VirtualColumnUtils::filterBlockWithQuery(query, block, context);
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block, context);
if (!block.rows())
return BlockInputStreams();

View File

@ -21,7 +21,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -23,7 +23,7 @@ StorageSystemDatabases::StorageSystemDatabases(const std::string & name_)
BlockInputStreams StorageSystemDatabases::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -42,7 +42,7 @@ StorageSystemDictionaries::StorageSystemDictionaries(const std::string & name)
BlockInputStreams StorageSystemDictionaries::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -19,7 +19,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -23,7 +23,7 @@ StorageSystemEvents::StorageSystemEvents(const std::string & name_)
BlockInputStreams StorageSystemEvents::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -25,7 +25,7 @@ StorageSystemFunctions::StorageSystemFunctions(const std::string & name_)
BlockInputStreams StorageSystemFunctions::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -127,7 +127,7 @@ StorageSystemGraphite::StorageSystemGraphite(const std::string & name_)
BlockInputStreams StorageSystemGraphite::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -17,7 +17,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -37,7 +37,7 @@ StorageSystemMerges::StorageSystemMerges(const std::string & name)
BlockInputStreams StorageSystemMerges::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -20,7 +20,7 @@ public:
const NamesAndTypesList & getColumnsListImpl() const override { return columns; }
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -24,7 +24,7 @@ StorageSystemMetrics::StorageSystemMetrics(const std::string & name_)
BlockInputStreams StorageSystemMetrics::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -57,7 +57,7 @@ StorageSystemNumbers::StorageSystemNumbers(const std::string & name_, bool multi
BlockInputStreams StorageSystemNumbers::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -30,7 +30,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -18,7 +18,7 @@ StorageSystemOne::StorageSystemOne(const std::string & name_)
BlockInputStreams StorageSystemOne::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -26,7 +26,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -47,7 +47,7 @@ StorageSystemParts::StorageSystemParts(const std::string & name_)
BlockInputStreams StorageSystemParts::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -73,7 +73,7 @@ BlockInputStreams StorageSystemParts::read(
block_to_filter.insert(ColumnWithTypeAndName(database_column, std::make_shared<DataTypeString>(), "database"));
/// Filter block_to_filter with column 'database'.
VirtualColumnUtils::filterBlockWithQuery(query, block_to_filter, context);
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context);
if (!block_to_filter.rows())
return BlockInputStreams();
@ -129,7 +129,7 @@ BlockInputStreams StorageSystemParts::read(
}
/// Filter block_to_filter with columns 'database', 'table', 'engine', 'active'.
VirtualColumnUtils::filterBlockWithQuery(query, block_to_filter, context);
VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context);
/// If all was filtered out.
if (!block_to_filter.rows())

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -55,7 +55,7 @@ StorageSystemProcesses::StorageSystemProcesses(const std::string & name_)
BlockInputStreams StorageSystemProcesses::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -50,7 +50,7 @@ StorageSystemReplicas::StorageSystemReplicas(const std::string & name_)
BlockInputStreams StorageSystemReplicas::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -98,7 +98,7 @@ BlockInputStreams StorageSystemReplicas::read(
{
Block filtered_block { col_database, col_table, col_engine };
VirtualColumnUtils::filterBlockWithQuery(query, filtered_block, context);
VirtualColumnUtils::filterBlockWithQuery(query_info.query, filtered_block, context);
if (!filtered_block.rows())
return BlockInputStreams();

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -49,7 +49,7 @@ StorageSystemReplicationQueue::StorageSystemReplicationQueue(const std::string &
BlockInputStreams StorageSystemReplicationQueue::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -80,7 +80,7 @@ BlockInputStreams StorageSystemReplicationQueue::read(
{
Block filtered_block { col_database_to_filter, col_table_to_filter };
VirtualColumnUtils::filterBlockWithQuery(query, filtered_block, context);
VirtualColumnUtils::filterBlockWithQuery(query_info.query, filtered_block, context);
if (!filtered_block.rows())
return BlockInputStreams();

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -24,7 +24,7 @@ StorageSystemSettings::StorageSystemSettings(const std::string & name_)
BlockInputStreams StorageSystemSettings::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -52,7 +52,7 @@ static ColumnWithTypeAndName getFilteredDatabases(const ASTPtr & query, const Co
BlockInputStreams StorageSystemTables::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -87,7 +87,7 @@ BlockInputStreams StorageSystemTables::read(
col_meta_mod_time.column = std::make_shared<ColumnUInt32>();
block.insert(col_meta_mod_time);
ColumnWithTypeAndName filtered_databases_column = getFilteredDatabases(query, context);
ColumnWithTypeAndName filtered_databases_column = getFilteredDatabases(query_info.query, context);
for (size_t row_number = 0; row_number < filtered_databases_column.column->size(); ++row_number)
{

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -105,7 +105,7 @@ static String extractPath(const ASTPtr & query)
BlockInputStreams StorageSystemZooKeeper::read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
const size_t max_block_size,
@ -114,7 +114,7 @@ BlockInputStreams StorageSystemZooKeeper::read(
check(column_names);
processed_stage = QueryProcessingStage::FetchColumns;
String path = extractPath(query);
String path = extractPath(query_info.query);
if (path.empty())
throw Exception("SELECT from system.zookeeper table must contain condition like path = 'path' in WHERE clause.");

View File

@ -23,7 +23,7 @@ public:
BlockInputStreams read(
const Names & column_names,
const ASTPtr & query,
const SelectQueryInfo & query_info,
const Context & context,
QueryProcessingStage::Enum & processed_stage,
size_t max_block_size,

View File

@ -132,7 +132,7 @@ try
QueryProcessingStage::Enum stage;
BlockInputStreamPtr in = table->read(column_names, 0, Context::createGlobal(), stage, 8192, 1)[0];
BlockInputStreamPtr in = table->read(column_names, {}, Context::createGlobal(), stage, 8192, 1)[0];
RowOutputStreamPtr out_ = std::make_shared<TabSeparatedRowOutputStream>(out_buf, sample);
BlockOutputStreamFromRowOutputStream out(out_);
copyData(*in, out);

View File

@ -9,6 +9,7 @@
#include <DataTypes/DataTypesNumber.h>
#include <Columns/ColumnsNumber.h>
#include <Interpreters/Context.h>
#include <Common/typeid_cast.h>
int main(int argc, char ** argv)
@ -68,7 +69,7 @@ try
QueryProcessingStage::Enum stage;
BlockInputStreamPtr in = table->read(column_names, 0, Context::createGlobal(), stage, 8192, 1)[0];
BlockInputStreamPtr in = table->read(column_names, {}, Context::createGlobal(), stage, 8192, 1)[0];
Block sample;
{

View File

@ -29,7 +29,7 @@ try
QueryProcessingStage::Enum stage;
LimitBlockInputStream input(table->read(column_names, 0, Context::createGlobal(), stage, 10, 1)[0], 10, 96);
LimitBlockInputStream input(table->read(column_names, {}, Context::createGlobal(), stage, 10, 1)[0], 10, 96);
RowOutputStreamPtr output_ = std::make_shared<TabSeparatedRowOutputStream>(out_buf, sample);
BlockOutputStreamFromRowOutputStream output(output_);