2017-04-01 09:19:00 +00:00
|
|
|
#include <DataStreams/AddingConstColumnBlockInputStream.h>
|
|
|
|
#include <DataStreams/narrowBlockInputStreams.h>
|
|
|
|
#include <DataStreams/LazyBlockInputStream.h>
|
|
|
|
#include <DataStreams/NullBlockInputStream.h>
|
2018-02-22 23:02:35 +00:00
|
|
|
#include <DataStreams/ConvertingBlockInputStream.h>
|
2018-02-23 01:00:47 +00:00
|
|
|
#include <DataStreams/OneBlockInputStream.h>
|
|
|
|
#include <DataStreams/ConcatBlockInputStream.h>
|
|
|
|
#include <DataStreams/materializeBlock.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Storages/StorageMerge.h>
|
2017-12-30 00:36:06 +00:00
|
|
|
#include <Storages/StorageFactory.h>
|
2017-11-20 05:22:54 +00:00
|
|
|
#include <Storages/VirtualColumnUtils.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Interpreters/InterpreterAlterQuery.h>
|
|
|
|
#include <Interpreters/ExpressionActions.h>
|
2017-12-30 00:36:06 +00:00
|
|
|
#include <Interpreters/evaluateConstantExpression.h>
|
2018-02-23 01:00:47 +00:00
|
|
|
#include <Interpreters/InterpreterSelectQuery.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Parsers/ASTSelectQuery.h>
|
2017-12-30 00:36:06 +00:00
|
|
|
#include <Parsers/ASTLiteral.h>
|
2018-02-22 23:02:35 +00:00
|
|
|
#include <Parsers/ASTIdentifier.h>
|
|
|
|
#include <Parsers/ASTExpressionList.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <DataTypes/DataTypeString.h>
|
|
|
|
#include <Columns/ColumnString.h>
|
2017-07-13 20:58:19 +00:00
|
|
|
#include <Common/typeid_cast.h>
|
2017-04-01 09:19:00 +00:00
|
|
|
#include <Databases/IDatabase.h>
|
2018-09-18 16:06:15 +00:00
|
|
|
#include <Interpreters/SettingsCommon.h>
|
2018-09-18 11:09:21 +00:00
|
|
|
#include <DataStreams/MaterializingBlockInputStream.h>
|
2018-09-19 10:16:30 +00:00
|
|
|
#include <DataStreams/FilterBlockInputStream.h>
|
|
|
|
#include <ext/range.h>
|
2014-12-30 18:04:53 +00:00
|
|
|
|
2012-05-30 05:53:09 +00:00
|
|
|
|
|
|
|
namespace DB
|
|
|
|
{
|
|
|
|
|
2016-01-11 21:46:36 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
extern const int ILLEGAL_PREWHERE;
|
|
|
|
extern const int INCOMPATIBLE_SOURCE_TABLES;
|
2017-12-30 00:36:06 +00:00
|
|
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
2018-07-06 16:22:15 +00:00
|
|
|
extern const int NO_SUCH_COLUMN_IN_TABLE;
|
2016-01-11 21:46:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-09-30 03:08:47 +00:00
|
|
|
StorageMerge::StorageMerge(
|
2017-04-01 07:20:54 +00:00
|
|
|
const std::string & name_,
|
2018-03-06 20:18:34 +00:00
|
|
|
const ColumnsDescription & columns_,
|
2017-04-01 07:20:54 +00:00
|
|
|
const String & source_database_,
|
|
|
|
const String & table_name_regexp_,
|
|
|
|
const Context & context_)
|
2018-03-06 20:18:34 +00:00
|
|
|
: IStorage{columns_},
|
2018-01-25 14:42:39 +00:00
|
|
|
name(name_), source_database(source_database_),
|
2017-04-01 07:20:54 +00:00
|
|
|
table_name_regexp(table_name_regexp_), context(context_)
|
2012-05-30 05:53:09 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-06-15 14:07:31 +00:00
|
|
|
|
2018-07-05 19:56:51 +00:00
|
|
|
/// NOTE Structure of underlying tables as well as their set are not constant,
|
|
|
|
/// so the results of these methods may become obsolete after the call.
|
|
|
|
|
2017-12-25 21:57:29 +00:00
|
|
|
NameAndTypePair StorageMerge::getColumn(const String & column_name) const
|
2014-01-16 14:52:13 +00:00
|
|
|
{
|
2018-07-05 21:26:23 +00:00
|
|
|
/// virtual column of the Merge table itself
|
|
|
|
if (column_name == "_table")
|
|
|
|
return { column_name, std::make_shared<DataTypeString>() };
|
|
|
|
|
2018-07-06 16:22:15 +00:00
|
|
|
if (IStorage::hasColumn(column_name))
|
|
|
|
return IStorage::getColumn(column_name);
|
2014-07-31 09:11:49 +00:00
|
|
|
|
2018-07-05 21:26:23 +00:00
|
|
|
/// virtual (and real) columns of the underlying tables
|
2018-07-05 19:56:51 +00:00
|
|
|
auto first_table = getFirstTable([](auto &&) { return true; });
|
|
|
|
if (first_table)
|
|
|
|
return first_table->getColumn(column_name);
|
2018-07-05 21:26:23 +00:00
|
|
|
|
2018-07-06 16:22:15 +00:00
|
|
|
throw Exception("There is no column " + column_name + " in table.", ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
|
2014-01-16 14:52:13 +00:00
|
|
|
}
|
|
|
|
|
2014-07-31 09:11:49 +00:00
|
|
|
bool StorageMerge::hasColumn(const String & column_name) const
|
2014-01-16 14:52:13 +00:00
|
|
|
{
|
2018-07-05 21:26:23 +00:00
|
|
|
if (column_name == "_table")
|
|
|
|
return true;
|
|
|
|
|
2018-07-06 16:22:15 +00:00
|
|
|
if (IStorage::hasColumn(column_name))
|
|
|
|
return true;
|
|
|
|
|
2018-07-05 19:56:51 +00:00
|
|
|
auto first_table = getFirstTable([](auto &&) { return true; });
|
|
|
|
if (first_table)
|
|
|
|
return first_table->hasColumn(column_name);
|
2018-07-05 21:26:23 +00:00
|
|
|
|
2018-07-06 16:22:15 +00:00
|
|
|
return false;
|
2014-01-16 14:52:13 +00:00
|
|
|
}
|
2012-05-30 05:53:09 +00:00
|
|
|
|
2018-02-23 01:00:47 +00:00
|
|
|
|
2018-07-05 19:56:51 +00:00
|
|
|
template <typename F>
|
|
|
|
StoragePtr StorageMerge::getFirstTable(F && predicate) const
|
2017-07-12 18:44:27 +00:00
|
|
|
{
|
2018-02-23 01:00:47 +00:00
|
|
|
auto database = context.getDatabase(source_database);
|
|
|
|
auto iterator = database->getIterator(context);
|
2017-07-12 18:44:27 +00:00
|
|
|
|
2018-02-23 01:00:47 +00:00
|
|
|
while (iterator->isValid())
|
2017-07-12 18:44:27 +00:00
|
|
|
{
|
2018-02-23 01:00:47 +00:00
|
|
|
if (table_name_regexp.match(iterator->name()))
|
2017-07-12 18:44:27 +00:00
|
|
|
{
|
2018-02-23 01:00:47 +00:00
|
|
|
auto & table = iterator->table();
|
2018-07-05 19:56:51 +00:00
|
|
|
if (table.get() != this && predicate(table))
|
|
|
|
return table;
|
2017-07-12 18:44:27 +00:00
|
|
|
}
|
2018-02-23 01:00:47 +00:00
|
|
|
|
|
|
|
iterator->next();
|
2017-07-12 18:44:27 +00:00
|
|
|
}
|
2018-02-23 01:00:47 +00:00
|
|
|
|
2018-07-05 19:56:51 +00:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool StorageMerge::isRemote() const
|
|
|
|
{
|
|
|
|
auto first_remote_table = getFirstTable([](const StoragePtr & table) { return table->isRemote(); });
|
|
|
|
return first_remote_table != nullptr;
|
2017-07-12 18:44:27 +00:00
|
|
|
}
|
|
|
|
|
2017-07-21 20:59:01 +00:00
|
|
|
|
2018-03-16 09:00:04 +00:00
|
|
|
bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand) const
|
|
|
|
{
|
|
|
|
/// It's beneficial if it is true for at least one table.
|
|
|
|
StorageListWithLocks selected_tables = getSelectedTables();
|
|
|
|
|
|
|
|
size_t i = 0;
|
|
|
|
for (const auto & table : selected_tables)
|
|
|
|
{
|
|
|
|
if (table.first->mayBenefitFromIndexForIn(left_in_operand))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
++i;
|
|
|
|
/// For simplicity reasons, check only first ten tables.
|
|
|
|
if (i > 10)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-19 14:47:09 +00:00
|
|
|
QueryProcessingStage::Enum StorageMerge::getQueryProcessingStage(const Context & context) const
|
|
|
|
{
|
|
|
|
auto stage_in_source_tables = QueryProcessingStage::FetchColumns;
|
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
DatabasePtr database = context.getDatabase(source_database);
|
|
|
|
DatabaseIteratorPtr iterator = database->getIterator(context);
|
2018-04-19 14:47:09 +00:00
|
|
|
|
2018-09-08 11:29:23 +00:00
|
|
|
size_t selected_table_size = 0;
|
2018-04-19 14:47:09 +00:00
|
|
|
|
|
|
|
while (iterator->isValid())
|
|
|
|
{
|
|
|
|
if (table_name_regexp.match(iterator->name()))
|
|
|
|
{
|
|
|
|
auto & table = iterator->table();
|
|
|
|
if (table.get() != this)
|
2018-09-18 11:09:21 +00:00
|
|
|
{
|
2018-09-08 11:29:23 +00:00
|
|
|
++selected_table_size;
|
2018-09-18 11:09:21 +00:00
|
|
|
stage_in_source_tables = table->getQueryProcessingStage(context);
|
|
|
|
}
|
|
|
|
|
2018-04-19 14:47:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
iterator->next();
|
|
|
|
}
|
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
auto fetch_stage = std::max(stage_in_source_tables, QueryProcessingStage::WithMergeableState);
|
|
|
|
return selected_table_size == 1 ? fetch_stage : QueryProcessingStage::WithMergeableState;
|
2018-04-19 14:47:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-05-30 05:53:09 +00:00
|
|
|
BlockInputStreams StorageMerge::read(
|
2017-04-01 07:20:54 +00:00
|
|
|
const Names & column_names,
|
2017-07-15 03:48:36 +00:00
|
|
|
const SelectQueryInfo & query_info,
|
2017-04-01 07:20:54 +00:00
|
|
|
const Context & context,
|
2018-04-19 14:47:09 +00:00
|
|
|
QueryProcessingStage::Enum processed_stage,
|
2017-04-01 07:20:54 +00:00
|
|
|
const size_t max_block_size,
|
2017-06-02 15:54:39 +00:00
|
|
|
const unsigned num_streams)
|
2012-05-30 05:53:09 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
BlockInputStreams res;
|
|
|
|
|
2018-02-23 01:00:47 +00:00
|
|
|
bool has_table_virtual_column = false;
|
|
|
|
Names real_column_names;
|
|
|
|
real_column_names.reserve(column_names.size());
|
|
|
|
|
|
|
|
for (const auto & name : column_names)
|
|
|
|
{
|
|
|
|
if (name == "_table")
|
|
|
|
{
|
|
|
|
has_table_virtual_column = true;
|
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
else
|
2018-02-23 01:00:47 +00:00
|
|
|
real_column_names.push_back(name);
|
|
|
|
}
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
/** Just in case, turn off optimization "transfer to PREWHERE",
|
|
|
|
* since there is no certainty that it works when one of table is MergeTree and other is not.
|
|
|
|
*/
|
2017-05-24 21:06:29 +00:00
|
|
|
Context modified_context = context;
|
|
|
|
modified_context.getSettingsRef().optimize_move_to_prewhere = false;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-02-23 01:00:47 +00:00
|
|
|
/// What will be result structure depending on query processed stage in source tables?
|
2018-09-18 11:09:21 +00:00
|
|
|
Block header = getQueryHeader(column_names, query_info, context, processed_stage);
|
|
|
|
|
|
|
|
/** First we make list of selected tables to find out its size.
|
|
|
|
* This is necessary to correctly pass the recommended number of threads to each table.
|
|
|
|
*/
|
|
|
|
StorageListWithLocks selected_tables = getSelectedTables(query_info.query, has_table_virtual_column, true);
|
2018-02-21 06:47:32 +00:00
|
|
|
|
2018-09-18 16:06:15 +00:00
|
|
|
if (selected_tables.empty())
|
2018-09-19 10:16:30 +00:00
|
|
|
return createSourceStreams(
|
|
|
|
query_info, processed_stage, max_block_size, modified_context, header, {}, {}, real_column_names, 0, has_table_virtual_column);
|
2018-09-18 16:06:15 +00:00
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
size_t remaining_streams = num_streams;
|
2017-04-01 07:20:54 +00:00
|
|
|
size_t tables_count = selected_tables.size();
|
2018-02-21 19:42:42 +00:00
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
for (auto it = selected_tables.begin(); it != selected_tables.end(); ++it)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-09-18 11:09:21 +00:00
|
|
|
size_t current_need_streams = tables_count >= num_streams ? 1 : (num_streams / tables_count);
|
|
|
|
size_t current_streams = std::min(current_need_streams, remaining_streams);
|
|
|
|
remaining_streams -= current_streams;
|
2017-04-01 07:20:54 +00:00
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
StoragePtr storage = it->first;
|
|
|
|
TableStructureReadLockPtr struct_lock = it->second;
|
2017-07-21 20:59:01 +00:00
|
|
|
|
2017-04-01 07:20:54 +00:00
|
|
|
BlockInputStreams source_streams;
|
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
if (current_streams)
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-09-18 11:09:21 +00:00
|
|
|
source_streams = createSourceStreams(
|
2018-09-18 16:06:15 +00:00
|
|
|
query_info, processed_stage, max_block_size, modified_context, header, storage,
|
|
|
|
struct_lock, real_column_names, current_streams, has_table_virtual_column);
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-09-18 16:06:15 +00:00
|
|
|
source_streams.emplace_back(std::make_shared<LazyBlockInputStream>(header, [=, &real_column_names]() -> BlockInputStreamPtr
|
2017-04-01 07:20:54 +00:00
|
|
|
{
|
2018-09-18 16:06:15 +00:00
|
|
|
BlockInputStreams streams = createSourceStreams(query_info, processed_stage, max_block_size,
|
2018-09-18 11:09:21 +00:00
|
|
|
modified_context, header, storage,
|
2018-09-18 16:06:15 +00:00
|
|
|
struct_lock, real_column_names, current_streams, has_table_virtual_column);
|
2018-09-18 11:09:21 +00:00
|
|
|
|
2018-09-19 10:16:30 +00:00
|
|
|
if (streams.empty())
|
|
|
|
return std::make_shared<NullBlockInputStream>(header);
|
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
if (streams.size() != 1)
|
|
|
|
throw Exception("LogicalError: the lazy stream size must to be one.", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
|
|
|
return streams[0];
|
2017-04-01 07:20:54 +00:00
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
|
|
|
res.insert(res.end(), source_streams.begin(), source_streams.end());
|
|
|
|
}
|
|
|
|
|
2018-02-22 21:35:17 +00:00
|
|
|
if (res.empty())
|
|
|
|
return res;
|
|
|
|
|
2017-07-04 14:59:01 +00:00
|
|
|
res = narrowBlockInputStreams(res, num_streams);
|
|
|
|
return res;
|
2012-05-30 05:53:09 +00:00
|
|
|
}
|
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
BlockInputStreams StorageMerge::createSourceStreams(const SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage,
|
2018-09-18 16:06:15 +00:00
|
|
|
const size_t max_block_size, const Context & modified_context,
|
|
|
|
const Block & header, const StoragePtr & storage,
|
|
|
|
const TableStructureReadLockPtr & struct_lock, Names & real_column_names,
|
|
|
|
size_t current_streams, bool has_table_virtual_column)
|
2014-02-11 18:38:21 +00:00
|
|
|
{
|
2018-09-18 11:09:21 +00:00
|
|
|
SelectQueryInfo modified_query_info;
|
|
|
|
modified_query_info.query = query_info.query->clone();
|
|
|
|
modified_query_info.prewhere_info = query_info.prewhere_info;
|
|
|
|
modified_query_info.sets = query_info.sets;
|
|
|
|
|
2018-09-19 10:16:30 +00:00
|
|
|
VirtualColumnUtils::rewriteEntityInAst(modified_query_info.query, "_table", storage ? storage->getTableName() : "");
|
|
|
|
|
|
|
|
if (!storage)
|
|
|
|
return BlockInputStreams{
|
|
|
|
InterpreterSelectQuery(modified_query_info.query, modified_context, std::make_shared<OneBlockInputStream>(header),
|
|
|
|
processed_stage, true).execute().in};
|
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
BlockInputStreams source_streams;
|
|
|
|
|
|
|
|
if (processed_stage <= storage->getQueryProcessingStage(modified_context))
|
|
|
|
{
|
2018-09-18 16:06:15 +00:00
|
|
|
/// If there are only virtual columns in query, you must request at least one other column.
|
|
|
|
if (real_column_names.size() ==0)
|
|
|
|
real_column_names.push_back(ExpressionActions::getSmallestColumn(storage->getColumns().getAllPhysical()));
|
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
source_streams = storage->read(real_column_names, modified_query_info, modified_context, processed_stage, max_block_size,
|
|
|
|
current_streams);
|
|
|
|
}
|
|
|
|
else if (processed_stage > storage->getQueryProcessingStage(modified_context))
|
|
|
|
{
|
|
|
|
typeid_cast<ASTSelectQuery *>(modified_query_info.query.get())->replaceDatabaseAndTable(source_database, storage->getTableName());
|
|
|
|
|
|
|
|
/// TODO: set num_streams
|
|
|
|
InterpreterSelectQuery interpreter{modified_query_info.query, modified_context, Names{}, processed_stage};
|
|
|
|
BlockInputStreamPtr interpreter_stream = interpreter.execute().in;
|
|
|
|
|
|
|
|
/** Materialization is needed, since from distributed storage the constants come materialized.
|
|
|
|
* If you do not do this, different types (Const and non-Const) columns will be produced in different threads,
|
|
|
|
* And this is not allowed, since all code is based on the assumption that in the block stream all types are the same.
|
|
|
|
*/
|
|
|
|
source_streams.emplace_back(std::make_shared<MaterializingBlockInputStream>(interpreter_stream));
|
|
|
|
}
|
2014-02-11 18:38:21 +00:00
|
|
|
|
2018-09-19 10:16:30 +00:00
|
|
|
if (!source_streams.empty())
|
2018-09-18 16:06:15 +00:00
|
|
|
{
|
2018-09-19 10:16:30 +00:00
|
|
|
if (!current_streams)
|
|
|
|
{
|
|
|
|
BlockInputStreamPtr stream =
|
|
|
|
source_streams.size() > 1 ? std::make_shared<ConcatBlockInputStream>(source_streams) : source_streams[0];
|
2018-09-18 11:09:21 +00:00
|
|
|
|
2018-09-19 10:16:30 +00:00
|
|
|
source_streams.resize(1);
|
|
|
|
source_streams[0] = stream;
|
|
|
|
}
|
2018-09-18 11:09:21 +00:00
|
|
|
|
2018-09-19 10:16:30 +00:00
|
|
|
for (BlockInputStreamPtr & source_stream : source_streams)
|
|
|
|
{
|
|
|
|
if (has_table_virtual_column)
|
|
|
|
source_stream = std::make_shared<AddingConstColumnBlockInputStream<String>>(
|
|
|
|
source_stream, std::make_shared<DataTypeString>(), storage->getTableName(), "_table");
|
2018-09-18 11:09:21 +00:00
|
|
|
|
2018-09-19 10:16:30 +00:00
|
|
|
/// Subordinary tables could have different but convertible types, like numeric types of different width.
|
|
|
|
/// We must return streams with structure equals to structure of Merge table.
|
|
|
|
convertingSourceStream(header, modified_context, storage, modified_query_info, source_stream, processed_stage);
|
2014-02-11 18:38:21 +00:00
|
|
|
|
2018-09-19 10:16:30 +00:00
|
|
|
source_stream->addTableLock(struct_lock);
|
|
|
|
}
|
2018-09-18 11:09:21 +00:00
|
|
|
}
|
2018-09-19 10:16:30 +00:00
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
return source_streams;
|
2014-02-11 18:38:21 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 15:54:27 +00:00
|
|
|
StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables() const
|
2013-09-23 12:01:19 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
StorageListWithLocks selected_tables;
|
|
|
|
auto database = context.getDatabase(source_database);
|
2017-09-11 12:39:01 +00:00
|
|
|
auto iterator = database->getIterator(context);
|
2017-04-01 07:20:54 +00:00
|
|
|
|
|
|
|
while (iterator->isValid())
|
|
|
|
{
|
|
|
|
if (table_name_regexp.match(iterator->name()))
|
|
|
|
{
|
|
|
|
auto & table = iterator->table();
|
|
|
|
if (table.get() != this)
|
2017-09-01 15:05:23 +00:00
|
|
|
selected_tables.emplace_back(table, table->lockStructure(false, __PRETTY_FUNCTION__));
|
2017-04-01 07:20:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
iterator->next();
|
|
|
|
}
|
|
|
|
|
|
|
|
return selected_tables;
|
2012-05-30 05:53:09 +00:00
|
|
|
}
|
2013-09-23 12:01:19 +00:00
|
|
|
|
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables(const ASTPtr & query, bool has_virtual_column, bool get_lock) const
|
|
|
|
{
|
|
|
|
StorageListWithLocks selected_tables;
|
|
|
|
DatabasePtr database = context.getDatabase(source_database);
|
|
|
|
DatabaseIteratorPtr iterator = database->getIterator(context);
|
|
|
|
|
|
|
|
auto virtual_column = ColumnString::create();
|
|
|
|
|
|
|
|
while (iterator->isValid())
|
|
|
|
{
|
|
|
|
if (table_name_regexp.match(iterator->name()))
|
|
|
|
{
|
|
|
|
StoragePtr storage = iterator->table();
|
|
|
|
|
|
|
|
if (query && typeid_cast<ASTSelectQuery *>(query.get())->prewhere_expression && !storage->supportsPrewhere())
|
2018-09-19 10:16:30 +00:00
|
|
|
throw Exception("Storage " + storage->getName() + " doesn't support PREWHERE.", ErrorCodes::ILLEGAL_PREWHERE);
|
2018-09-18 11:09:21 +00:00
|
|
|
|
|
|
|
if (storage.get() != this)
|
|
|
|
{
|
|
|
|
virtual_column->insert(storage->getTableName());
|
|
|
|
selected_tables.emplace_back(storage, get_lock ? storage->lockStructure(false, __PRETTY_FUNCTION__) : TableStructureReadLockPtr{});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
iterator->next();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (has_virtual_column)
|
|
|
|
{
|
|
|
|
Block virtual_columns_block = Block{ColumnWithTypeAndName(std::move(virtual_column), std::make_shared<DataTypeString>(), "_table")};
|
|
|
|
VirtualColumnUtils::filterBlockWithQuery(query, virtual_columns_block, context);
|
|
|
|
auto values = VirtualColumnUtils::extractSingleValueFromBlock<String>(virtual_columns_block, "_table");
|
|
|
|
|
|
|
|
/// Remove unused tables from the list
|
|
|
|
selected_tables.remove_if([&] (const auto & elem) { return values.find(elem.first->getTableName()) == values.end(); });
|
|
|
|
}
|
|
|
|
|
|
|
|
return selected_tables;
|
|
|
|
}
|
|
|
|
|
2016-01-28 01:00:27 +00:00
|
|
|
void StorageMerge::alter(const AlterCommands & params, const String & database_name, const String & table_name, const Context & context)
|
2013-09-23 12:01:19 +00:00
|
|
|
{
|
2017-04-01 07:20:54 +00:00
|
|
|
for (const auto & param : params)
|
|
|
|
if (param.type == AlterCommand::MODIFY_PRIMARY_KEY)
|
|
|
|
throw Exception("Storage engine " + getName() + " doesn't support primary key.", ErrorCodes::NOT_IMPLEMENTED);
|
2016-05-05 18:28:46 +00:00
|
|
|
|
2017-09-01 15:05:23 +00:00
|
|
|
auto lock = lockStructureForAlter(__PRETTY_FUNCTION__);
|
2016-05-13 21:08:19 +00:00
|
|
|
|
2018-03-13 14:18:11 +00:00
|
|
|
ColumnsDescription new_columns = getColumns();
|
|
|
|
params.apply(new_columns);
|
|
|
|
context.getDatabase(database_name)->alterTable(context, table_name, new_columns, {});
|
|
|
|
setColumns(new_columns);
|
2013-09-23 12:01:19 +00:00
|
|
|
}
|
2014-07-11 08:12:03 +00:00
|
|
|
|
2018-09-18 11:09:21 +00:00
|
|
|
Block StorageMerge::getQueryHeader(
|
|
|
|
const Names & column_names, const SelectQueryInfo & query_info, const Context & context, QueryProcessingStage::Enum processed_stage)
|
|
|
|
{
|
|
|
|
switch (processed_stage)
|
|
|
|
{
|
|
|
|
case QueryProcessingStage::FetchColumns:
|
|
|
|
{
|
|
|
|
Block header = getSampleBlockForColumns(column_names);
|
|
|
|
if (query_info.prewhere_info)
|
|
|
|
{
|
|
|
|
query_info.prewhere_info->prewhere_actions->execute(header);
|
|
|
|
header = materializeBlock(header);
|
|
|
|
if (query_info.prewhere_info->remove_prewhere_column)
|
|
|
|
header.erase(query_info.prewhere_info->prewhere_column_name);
|
|
|
|
}
|
|
|
|
return header;
|
|
|
|
}
|
|
|
|
case QueryProcessingStage::WithMergeableState:
|
|
|
|
case QueryProcessingStage::Complete:
|
|
|
|
return materializeBlock(InterpreterSelectQuery(
|
|
|
|
query_info.query, context, std::make_shared<OneBlockInputStream>(getSampleBlockForColumns(column_names)),
|
|
|
|
processed_stage, true).getSampleBlock());
|
|
|
|
}
|
|
|
|
throw Exception("Logical Error: unknow processed stage.", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
}
|
|
|
|
|
2018-09-19 10:16:30 +00:00
|
|
|
void StorageMerge::convertingSourceStream(const Block & header, const Context & context, const StoragePtr & storage,
|
|
|
|
const SelectQueryInfo & query_info, BlockInputStreamPtr & source_stream,
|
|
|
|
QueryProcessingStage::Enum processed_stage)
|
|
|
|
{
|
|
|
|
Block before_convert_header = source_stream->getHeader();
|
|
|
|
source_stream = std::make_shared<ConvertingBlockInputStream>(context, source_stream, header, ConvertingBlockInputStream::MatchColumnsMode::Name);
|
|
|
|
Block after_convert_header = source_stream->getHeader();
|
|
|
|
|
|
|
|
bool different_header = false;
|
|
|
|
|
|
|
|
for (size_t column_index : ext::range(0, before_convert_header.columns()))
|
|
|
|
{
|
|
|
|
ColumnWithTypeAndName before_column = before_convert_header.getByPosition(column_index);
|
|
|
|
if (after_convert_header.has(before_column.name))
|
|
|
|
{
|
|
|
|
ColumnWithTypeAndName after_column = after_convert_header.getByName(before_column.name);
|
|
|
|
if (before_column.type != after_column.type)
|
|
|
|
{
|
|
|
|
different_header = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!different_header)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/// HACK: Re-run filter stream for align processed_stage
|
|
|
|
if (processed_stage > QueryProcessingStage::FetchColumns)
|
|
|
|
{
|
|
|
|
ASTSelectQuery * select_query = typeid_cast<ASTSelectQuery *>(query_info.query.get());
|
|
|
|
if (select_query->where_expression)
|
|
|
|
{
|
|
|
|
Block expression_input = header.cloneEmpty();
|
|
|
|
NameAndTypePair name_and_type = getColumn("_table");
|
|
|
|
expression_input.insert({ name_and_type.type->createColumn(), name_and_type.type, name_and_type.name });
|
|
|
|
ExpressionAnalyzer analyzer = ExpressionAnalyzer{select_query->where_expression, context, {}, expression_input.getNamesAndTypesList()};
|
|
|
|
|
|
|
|
ExpressionActionsPtr where_actions = analyzer.getActions(false, false);
|
|
|
|
|
|
|
|
if (!after_convert_header.has("_table"))
|
|
|
|
source_stream = std::make_shared<AddingConstColumnBlockInputStream<String>>(
|
|
|
|
source_stream, std::make_shared<DataTypeString>(), storage->getTableName(), "_table");
|
|
|
|
|
|
|
|
source_stream = std::make_shared<FilterBlockInputStream>(
|
|
|
|
source_stream, where_actions, select_query->where_expression->getColumnName(), true);
|
|
|
|
|
|
|
|
if (!after_convert_header.has("_table") && !header.has("_table"))
|
|
|
|
source_stream = std::make_shared<ConvertingBlockInputStream>(context, source_stream, header, ConvertingBlockInputStream::MatchColumnsMode::Name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-30 00:36:06 +00:00
|
|
|
|
|
|
|
void registerStorageMerge(StorageFactory & factory)
|
|
|
|
{
|
|
|
|
factory.registerStorage("Merge", [](const StorageFactory::Arguments & args)
|
|
|
|
{
|
|
|
|
/** In query, the name of database is specified as table engine argument which contains source tables,
|
|
|
|
* as well as regex for source-table names.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ASTs & engine_args = args.engine_args;
|
|
|
|
|
|
|
|
if (engine_args.size() != 2)
|
|
|
|
throw Exception("Storage Merge requires exactly 2 parameters"
|
|
|
|
" - name of source database and regexp for table names.",
|
|
|
|
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
|
|
|
|
|
|
|
engine_args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[0], args.local_context);
|
|
|
|
engine_args[1] = evaluateConstantExpressionAsLiteral(engine_args[1], args.local_context);
|
|
|
|
|
|
|
|
String source_database = static_cast<const ASTLiteral &>(*engine_args[0]).value.safeGet<String>();
|
|
|
|
String table_name_regexp = static_cast<const ASTLiteral &>(*engine_args[1]).value.safeGet<String>();
|
|
|
|
|
|
|
|
return StorageMerge::create(
|
|
|
|
args.table_name, args.columns,
|
|
|
|
source_database, table_name_regexp, args.context);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2013-09-23 12:01:19 +00:00
|
|
|
}
|