2012-06-25 03:01:37 +00:00
|
|
|
|
#include <DB/DataStreams/narrowBlockInputStreams.h>
|
2012-05-30 05:53:09 +00:00
|
|
|
|
#include <DB/Storages/StorageMerge.h>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
StorageMerge::StorageMerge(
|
|
|
|
|
const std::string & name_,
|
|
|
|
|
NamesAndTypesListPtr columns_,
|
|
|
|
|
const String & source_database_,
|
|
|
|
|
const String & table_name_regexp_,
|
2013-05-05 18:02:05 +00:00
|
|
|
|
const Context & context_)
|
2012-05-30 05:53:09 +00:00
|
|
|
|
: name(name_), columns(columns_), source_database(source_database_), table_name_regexp(table_name_regexp_), context(context_)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2013-02-06 11:26:35 +00:00
|
|
|
|
StoragePtr StorageMerge::create(
|
|
|
|
|
const std::string & name_,
|
|
|
|
|
NamesAndTypesListPtr columns_,
|
|
|
|
|
const String & source_database_,
|
|
|
|
|
const String & table_name_regexp_,
|
2013-05-05 18:02:05 +00:00
|
|
|
|
const Context & context_)
|
2013-02-06 11:26:35 +00:00
|
|
|
|
{
|
|
|
|
|
return (new StorageMerge(name_, columns_, source_database_, table_name_regexp_, context_))->thisPtr();
|
|
|
|
|
}
|
|
|
|
|
|
2012-05-30 05:53:09 +00:00
|
|
|
|
|
|
|
|
|
BlockInputStreams StorageMerge::read(
|
|
|
|
|
const Names & column_names,
|
|
|
|
|
ASTPtr query,
|
2013-02-01 19:02:04 +00:00
|
|
|
|
const Settings & settings,
|
2012-05-30 05:53:09 +00:00
|
|
|
|
QueryProcessingStage::Enum & processed_stage,
|
|
|
|
|
size_t max_block_size,
|
|
|
|
|
unsigned threads)
|
|
|
|
|
{
|
|
|
|
|
BlockInputStreams res;
|
|
|
|
|
|
2012-08-02 17:33:31 +00:00
|
|
|
|
typedef std::vector<StoragePtr> SelectedTables;
|
|
|
|
|
SelectedTables selected_tables;
|
2012-05-30 05:53:09 +00:00
|
|
|
|
|
|
|
|
|
/// Среди всех стадий, до которых обрабатывается запрос в таблицах-источниках, выберем минимальную.
|
|
|
|
|
processed_stage = QueryProcessingStage::Complete;
|
|
|
|
|
QueryProcessingStage::Enum tmp_processed_stage = QueryProcessingStage::Complete;
|
|
|
|
|
|
2012-08-02 17:33:31 +00:00
|
|
|
|
/// Список таблиц могут менять в другом потоке.
|
|
|
|
|
{
|
|
|
|
|
Poco::ScopedLock<Poco::Mutex> lock(context.getMutex());
|
|
|
|
|
context.assertDatabaseExists(source_database);
|
|
|
|
|
|
|
|
|
|
const Tables & tables = context.getDatabases().at(source_database);
|
|
|
|
|
|
|
|
|
|
/** Сначала составим список выбранных таблиц, чтобы узнать его размер.
|
|
|
|
|
* Это нужно, чтобы правильно передать в каждую таблицу рекомендацию по количеству потоков.
|
|
|
|
|
*/
|
|
|
|
|
for (Tables::const_iterator it = tables.begin(); it != tables.end(); ++it)
|
|
|
|
|
if (it->second != this && table_name_regexp.match(it->first))
|
|
|
|
|
selected_tables.push_back(it->second);
|
|
|
|
|
}
|
2012-05-30 05:53:09 +00:00
|
|
|
|
|
|
|
|
|
for (SelectedTables::iterator it = selected_tables.begin(); it != selected_tables.end(); ++it)
|
|
|
|
|
{
|
2012-08-02 17:33:31 +00:00
|
|
|
|
BlockInputStreams source_streams = (*it)->read(
|
2012-05-30 05:53:09 +00:00
|
|
|
|
column_names,
|
|
|
|
|
query,
|
2013-02-01 19:02:04 +00:00
|
|
|
|
settings,
|
2012-05-30 05:53:09 +00:00
|
|
|
|
tmp_processed_stage,
|
|
|
|
|
max_block_size,
|
|
|
|
|
selected_tables.size() > threads ? 1 : (threads / selected_tables.size()));
|
|
|
|
|
|
|
|
|
|
for (BlockInputStreams::iterator jt = source_streams.begin(); jt != source_streams.end(); ++jt)
|
|
|
|
|
res.push_back(*jt);
|
|
|
|
|
|
|
|
|
|
if (tmp_processed_stage < processed_stage)
|
|
|
|
|
processed_stage = tmp_processed_stage;
|
|
|
|
|
}
|
|
|
|
|
|
2012-06-25 03:01:37 +00:00
|
|
|
|
/** Если истчоников слишком много, то склеим их в threads источников.
|
|
|
|
|
*/
|
|
|
|
|
if (res.size() > threads)
|
|
|
|
|
res = narrowBlockInputStreams(res, threads);
|
|
|
|
|
|
2012-05-30 05:53:09 +00:00
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|