2012-05-21 20:38:34 +00:00
|
|
|
|
#include <DB/DataStreams/RemoteBlockInputStream.h>
|
2014-12-17 20:38:38 +00:00
|
|
|
|
#include <DB/DataStreams/MaterializingBlockInputStream.h>
|
2015-10-12 14:53:16 +00:00
|
|
|
|
#include <DB/DataStreams/BlockExtraInfoInputStream.h>
|
2012-05-21 20:38:34 +00:00
|
|
|
|
|
|
|
|
|
#include <DB/Storages/StorageDistributed.h>
|
2014-07-31 09:11:49 +00:00
|
|
|
|
#include <DB/Storages/VirtualColumnFactory.h>
|
2014-08-12 13:46:46 +00:00
|
|
|
|
#include <DB/Storages/Distributed/DistributedBlockOutputStream.h>
|
2014-08-15 09:50:05 +00:00
|
|
|
|
#include <DB/Storages/Distributed/DirectoryMonitor.h>
|
2014-08-13 09:20:15 +00:00
|
|
|
|
#include <DB/Common/escapeForFileName.h>
|
2015-03-02 01:10:58 +00:00
|
|
|
|
#include <DB/Parsers/ASTInsertQuery.h>
|
2015-05-06 23:35:37 +00:00
|
|
|
|
#include <DB/Parsers/ASTSelectQuery.h>
|
2015-11-08 00:28:12 +00:00
|
|
|
|
#include <DB/Parsers/ASTIdentifier.h>
|
2015-10-12 14:53:16 +00:00
|
|
|
|
#include <DB/Parsers/TablePropertiesQueriesASTs.h>
|
2014-08-13 09:20:15 +00:00
|
|
|
|
|
2013-11-28 10:31:17 +00:00
|
|
|
|
#include <DB/Interpreters/InterpreterSelectQuery.h>
|
2014-07-11 08:12:03 +00:00
|
|
|
|
#include <DB/Interpreters/InterpreterAlterQuery.h>
|
2015-10-12 14:53:16 +00:00
|
|
|
|
#include <DB/Interpreters/InterpreterDescribeQuery.h>
|
2015-05-06 23:35:37 +00:00
|
|
|
|
#include <DB/Interpreters/ExpressionAnalyzer.h>
|
2014-08-13 15:07:53 +00:00
|
|
|
|
|
2014-02-11 17:10:48 +00:00
|
|
|
|
#include <DB/Core/Field.h>
|
2012-05-21 20:38:34 +00:00
|
|
|
|
|
2015-02-10 21:10:58 +00:00
|
|
|
|
#include <memory>
|
2014-08-19 08:04:13 +00:00
|
|
|
|
|
2012-05-21 20:38:34 +00:00
|
|
|
|
namespace DB
|
|
|
|
|
{
|
|
|
|
|
|
2016-01-11 21:46:36 +00:00
|
|
|
|
namespace ErrorCodes
|
|
|
|
|
{
|
|
|
|
|
extern const int STORAGE_REQUIRES_PARAMETER;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-08-21 12:07:29 +00:00
|
|
|
|
namespace
|
|
|
|
|
{
|
2014-08-13 13:43:54 +00:00
|
|
|
|
/// select query has database and table names as AST pointers
|
2014-08-22 14:05:34 +00:00
|
|
|
|
/// Создает копию запроса, меняет имена базы данных и таблицы.
|
2014-12-12 18:11:13 +00:00
|
|
|
|
inline ASTPtr rewriteSelectQuery(const ASTPtr & query, const std::string & database, const std::string & table)
|
2014-08-13 12:52:30 +00:00
|
|
|
|
{
|
2014-12-12 18:11:13 +00:00
|
|
|
|
auto modified_query_ast = query->clone();
|
2014-08-13 12:52:30 +00:00
|
|
|
|
|
2014-08-22 14:05:34 +00:00
|
|
|
|
auto & actual_query = typeid_cast<ASTSelectQuery &>(*modified_query_ast);
|
|
|
|
|
actual_query.database = new ASTIdentifier{{}, database, ASTIdentifier::Database};
|
|
|
|
|
actual_query.table = new ASTIdentifier{{}, table, ASTIdentifier::Table};
|
|
|
|
|
|
|
|
|
|
return modified_query_ast;
|
2014-08-13 12:52:30 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-22 14:05:34 +00:00
|
|
|
|
/// insert query has database and table names as bare strings
|
2014-08-13 12:52:30 +00:00
|
|
|
|
/// Создает копию запроса, меняет имена базы данных и таблицы.
|
2014-08-22 14:05:34 +00:00
|
|
|
|
inline ASTPtr rewriteInsertQuery(const ASTPtr & query, const std::string & database, const std::string & table)
|
2014-08-19 08:04:13 +00:00
|
|
|
|
{
|
2014-08-13 12:52:30 +00:00
|
|
|
|
auto modified_query_ast = query->clone();
|
|
|
|
|
|
2014-08-22 14:05:34 +00:00
|
|
|
|
auto & actual_query = typeid_cast<ASTInsertQuery &>(*modified_query_ast);
|
|
|
|
|
actual_query.database = database;
|
|
|
|
|
actual_query.table = table;
|
|
|
|
|
/// make sure query is not INSERT SELECT
|
|
|
|
|
actual_query.select = nullptr;
|
2014-08-13 12:52:30 +00:00
|
|
|
|
|
2014-08-21 12:07:29 +00:00
|
|
|
|
return modified_query_ast;
|
2014-08-13 12:52:30 +00:00
|
|
|
|
}
|
2016-01-26 02:00:25 +00:00
|
|
|
|
|
|
|
|
|
BlockExtraInfo toBlockExtraInfo(const Cluster::Address & address)
|
|
|
|
|
{
|
|
|
|
|
BlockExtraInfo block_extra_info;
|
|
|
|
|
block_extra_info.host = address.host_name;
|
|
|
|
|
block_extra_info.resolved_address = address.resolved_address.toString();
|
|
|
|
|
block_extra_info.port = address.port;
|
|
|
|
|
block_extra_info.user = address.user;
|
|
|
|
|
block_extra_info.is_valid = true;
|
|
|
|
|
return block_extra_info;
|
|
|
|
|
}
|
2014-08-13 12:52:30 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-11-06 17:04:38 +00:00
|
|
|
|
StorageDistributed::StorageDistributed(
|
|
|
|
|
const std::string & name_,
|
|
|
|
|
NamesAndTypesListPtr columns_,
|
|
|
|
|
const String & remote_database_,
|
|
|
|
|
const String & remote_table_,
|
2013-12-07 16:51:29 +00:00
|
|
|
|
Cluster & cluster_,
|
2014-08-15 09:50:05 +00:00
|
|
|
|
Context & context_,
|
2014-08-13 09:20:15 +00:00
|
|
|
|
const ASTPtr & sharding_key_,
|
|
|
|
|
const String & data_path_)
|
2012-11-06 17:04:38 +00:00
|
|
|
|
: name(name_), columns(columns_),
|
|
|
|
|
remote_database(remote_database_), remote_table(remote_table_),
|
2014-08-13 09:20:15 +00:00
|
|
|
|
context(context_), cluster(cluster_),
|
2015-07-15 01:26:35 +00:00
|
|
|
|
sharding_key_expr(sharding_key_ ? ExpressionAnalyzer(sharding_key_, context, nullptr, *columns).getActions(false) : nullptr),
|
2014-08-13 09:20:15 +00:00
|
|
|
|
sharding_key_column_name(sharding_key_ ? sharding_key_->getColumnName() : String{}),
|
2015-10-20 14:59:29 +00:00
|
|
|
|
write_enabled(!data_path_.empty() && (((cluster.getLocalShardCount() + cluster.getRemoteShardCount()) < 2) || sharding_key_)),
|
2014-12-30 03:53:41 +00:00
|
|
|
|
path(data_path_.empty() ? "" : (data_path_ + escapeForFileName(name) + '/'))
|
2013-11-28 10:31:17 +00:00
|
|
|
|
{
|
2014-08-13 11:26:13 +00:00
|
|
|
|
createDirectoryMonitors();
|
2013-11-28 10:31:17 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-30 03:08:47 +00:00
|
|
|
|
StorageDistributed::StorageDistributed(
|
|
|
|
|
const std::string & name_,
|
|
|
|
|
NamesAndTypesListPtr columns_,
|
2014-10-03 15:30:10 +00:00
|
|
|
|
const NamesAndTypesList & materialized_columns_,
|
2014-09-30 03:08:47 +00:00
|
|
|
|
const NamesAndTypesList & alias_columns_,
|
|
|
|
|
const ColumnDefaults & column_defaults_,
|
|
|
|
|
const String & remote_database_,
|
|
|
|
|
const String & remote_table_,
|
|
|
|
|
Cluster & cluster_,
|
|
|
|
|
Context & context_,
|
|
|
|
|
const ASTPtr & sharding_key_,
|
|
|
|
|
const String & data_path_)
|
2014-10-03 15:30:10 +00:00
|
|
|
|
: IStorage{materialized_columns_, alias_columns_, column_defaults_},
|
2014-09-30 03:08:47 +00:00
|
|
|
|
name(name_), columns(columns_),
|
|
|
|
|
remote_database(remote_database_), remote_table(remote_table_),
|
|
|
|
|
context(context_), cluster(cluster_),
|
2015-07-15 01:26:35 +00:00
|
|
|
|
sharding_key_expr(sharding_key_ ? ExpressionAnalyzer(sharding_key_, context, nullptr, *columns).getActions(false) : nullptr),
|
2014-09-30 03:08:47 +00:00
|
|
|
|
sharding_key_column_name(sharding_key_ ? sharding_key_->getColumnName() : String{}),
|
2015-10-20 14:59:29 +00:00
|
|
|
|
write_enabled(!data_path_.empty() && (((cluster.getLocalShardCount() + cluster.getRemoteShardCount()) < 2) || sharding_key_)),
|
2014-12-30 03:53:41 +00:00
|
|
|
|
path(data_path_.empty() ? "" : (data_path_ + escapeForFileName(name) + '/'))
|
2014-09-30 03:08:47 +00:00
|
|
|
|
{
|
|
|
|
|
createDirectoryMonitors();
|
|
|
|
|
}
|
|
|
|
|
|
2013-02-06 11:26:35 +00:00
|
|
|
|
StoragePtr StorageDistributed::create(
|
|
|
|
|
const std::string & name_,
|
|
|
|
|
NamesAndTypesListPtr columns_,
|
2014-10-03 15:30:10 +00:00
|
|
|
|
const NamesAndTypesList & materialized_columns_,
|
2014-09-30 03:08:47 +00:00
|
|
|
|
const NamesAndTypesList & alias_columns_,
|
|
|
|
|
const ColumnDefaults & column_defaults_,
|
2013-02-06 11:26:35 +00:00
|
|
|
|
const String & remote_database_,
|
|
|
|
|
const String & remote_table_,
|
2013-12-10 17:06:57 +00:00
|
|
|
|
const String & cluster_name,
|
2014-08-12 13:46:46 +00:00
|
|
|
|
Context & context_,
|
2014-08-13 09:20:15 +00:00
|
|
|
|
const ASTPtr & sharding_key_,
|
|
|
|
|
const String & data_path_)
|
2013-02-06 11:26:35 +00:00
|
|
|
|
{
|
2013-12-10 17:06:57 +00:00
|
|
|
|
context_.initClusters();
|
2014-08-13 09:20:15 +00:00
|
|
|
|
|
|
|
|
|
return (new StorageDistributed{
|
2014-10-03 15:30:10 +00:00
|
|
|
|
name_, columns_,
|
|
|
|
|
materialized_columns_, alias_columns_, column_defaults_,
|
2014-09-30 03:08:47 +00:00
|
|
|
|
remote_database_, remote_table_,
|
2014-08-13 09:20:15 +00:00
|
|
|
|
context_.getCluster(cluster_name), context_,
|
|
|
|
|
sharding_key_, data_path_
|
|
|
|
|
})->thisPtr();
|
2013-02-06 11:26:35 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-02-07 15:11:57 +00:00
|
|
|
|
|
|
|
|
|
StoragePtr StorageDistributed::create(
|
|
|
|
|
const std::string & name_,
|
|
|
|
|
NamesAndTypesListPtr columns_,
|
|
|
|
|
const String & remote_database_,
|
|
|
|
|
const String & remote_table_,
|
2014-02-23 02:27:09 +00:00
|
|
|
|
SharedPtr<Cluster> & owned_cluster_,
|
2014-08-13 09:20:15 +00:00
|
|
|
|
Context & context_)
|
2014-02-07 15:11:57 +00:00
|
|
|
|
{
|
2014-08-13 09:20:15 +00:00
|
|
|
|
auto res = new StorageDistributed{
|
|
|
|
|
name_, columns_, remote_database_,
|
2014-09-30 03:08:47 +00:00
|
|
|
|
remote_table_, *owned_cluster_, context_
|
|
|
|
|
};
|
2014-02-23 02:27:09 +00:00
|
|
|
|
|
|
|
|
|
/// Захватываем владение объектом-кластером.
|
|
|
|
|
res->owned_cluster = owned_cluster_;
|
|
|
|
|
|
|
|
|
|
return res->thisPtr();
|
2014-02-07 15:11:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-05-21 20:38:34 +00:00
|
|
|
|
BlockInputStreams StorageDistributed::read(
|
|
|
|
|
const Names & column_names,
|
|
|
|
|
ASTPtr query,
|
2014-12-17 11:53:17 +00:00
|
|
|
|
const Context & context,
|
2013-02-01 19:02:04 +00:00
|
|
|
|
const Settings & settings,
|
2012-05-22 18:32:45 +00:00
|
|
|
|
QueryProcessingStage::Enum & processed_stage,
|
2014-12-17 11:53:17 +00:00
|
|
|
|
const size_t max_block_size,
|
|
|
|
|
const unsigned threads)
|
2012-05-21 20:38:34 +00:00
|
|
|
|
{
|
2016-01-26 02:00:25 +00:00
|
|
|
|
Settings new_settings = settings;
|
|
|
|
|
new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.limits.max_execution_time);
|
|
|
|
|
/// Не имеет смысла на удалённых серверах, так как запрос отправляется обычно с другим user-ом.
|
|
|
|
|
new_settings.max_concurrent_queries_for_user = 0;
|
|
|
|
|
|
2015-10-20 14:59:29 +00:00
|
|
|
|
size_t result_size = (cluster.getRemoteShardCount() * settings.max_parallel_replicas) + cluster.getLocalShardCount();
|
2014-02-11 17:10:48 +00:00
|
|
|
|
|
2015-06-28 06:48:42 +00:00
|
|
|
|
processed_stage = result_size == 1 || settings.distributed_group_by_no_merge
|
2014-02-11 17:10:48 +00:00
|
|
|
|
? QueryProcessingStage::Complete
|
|
|
|
|
: QueryProcessingStage::WithMergeableState;
|
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
BlockInputStreams res;
|
2014-08-22 14:05:34 +00:00
|
|
|
|
const auto & modified_query_ast = rewriteSelectQuery(
|
2014-08-20 02:20:07 +00:00
|
|
|
|
query, remote_database, remote_table);
|
2016-01-26 02:00:25 +00:00
|
|
|
|
const auto & modified_query = queryToString(modified_query_ast);
|
|
|
|
|
|
|
|
|
|
/// Ограничение сетевого трафика, если нужно.
|
|
|
|
|
ThrottlerPtr throttler;
|
|
|
|
|
if (settings.limits.max_network_bandwidth || settings.limits.max_network_bytes)
|
|
|
|
|
throttler.reset(new Throttler(
|
|
|
|
|
settings.limits.max_network_bandwidth,
|
|
|
|
|
settings.limits.max_network_bytes,
|
|
|
|
|
"Limit for bytes to send or receive over network exceeded."));
|
2015-02-10 20:48:17 +00:00
|
|
|
|
|
2015-09-05 01:22:09 +00:00
|
|
|
|
Tables external_tables;
|
|
|
|
|
|
|
|
|
|
if (settings.global_subqueries_method == GlobalSubqueriesMethod::PUSH)
|
|
|
|
|
external_tables = context.getExternalTables();
|
2015-03-15 08:05:28 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
/// Распределить шарды равномерно по потокам.
|
|
|
|
|
|
|
|
|
|
size_t remote_count = cluster.getRemoteShardCount();
|
|
|
|
|
|
2015-11-06 17:44:01 +00:00
|
|
|
|
/// Отключаем мультиплексирование шардов, если есть ORDER BY без GROUP BY.
|
2015-11-13 17:21:16 +00:00
|
|
|
|
//const ASTSelectQuery & ast = *(static_cast<const ASTSelectQuery *>(modified_query_ast.get()));
|
2015-11-12 01:17:44 +00:00
|
|
|
|
|
|
|
|
|
/** Функциональность shard_multiplexing не доделана - выключаем её.
|
|
|
|
|
* (Потому что установка соединений с разными шардами в рамках одного потока выполняется не параллельно.)
|
|
|
|
|
* Подробнее смотрите в https://███████████.yandex-team.ru/METR-18300
|
|
|
|
|
*/
|
|
|
|
|
//bool enable_shard_multiplexing = !(ast.order_expression_list && !ast.group_expression_list);
|
|
|
|
|
bool enable_shard_multiplexing = false;
|
2015-11-06 17:44:01 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
size_t thread_count;
|
|
|
|
|
|
|
|
|
|
if (!enable_shard_multiplexing)
|
|
|
|
|
thread_count = remote_count;
|
|
|
|
|
else if (remote_count == 0)
|
|
|
|
|
thread_count = 0;
|
|
|
|
|
else if (settings.max_distributed_processing_threads == 0)
|
|
|
|
|
thread_count = 1;
|
|
|
|
|
else
|
|
|
|
|
thread_count = std::min(remote_count, static_cast<size_t>(settings.max_distributed_processing_threads));
|
|
|
|
|
|
|
|
|
|
size_t pools_per_thread = (thread_count > 0) ? (remote_count / thread_count) : 0;
|
|
|
|
|
size_t remainder = (thread_count > 0) ? (remote_count % thread_count) : 0;
|
2015-11-06 17:44:01 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
ConnectionPoolsPtr pools;
|
|
|
|
|
bool do_init = true;
|
|
|
|
|
|
|
|
|
|
/// Цикл по шардам.
|
|
|
|
|
size_t current_thread = 0;
|
|
|
|
|
for (const auto & shard_info : cluster.getShardsInfo())
|
|
|
|
|
{
|
|
|
|
|
if (shard_info.isLocal())
|
|
|
|
|
{
|
|
|
|
|
/// Добавляем запросы к локальному ClickHouse.
|
|
|
|
|
|
|
|
|
|
DB::Context new_context = context;
|
|
|
|
|
new_context.setSettings(new_settings);
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < shard_info.local_addresses.size(); ++i)
|
|
|
|
|
{
|
|
|
|
|
InterpreterSelectQuery interpreter(modified_query_ast, new_context, processed_stage);
|
|
|
|
|
|
|
|
|
|
/** Материализация нужна, так как с удалённых серверов константы приходят материализованными.
|
|
|
|
|
* Если этого не делать, то в разных потоках будут получаться разные типы (Const и не-Const) столбцов,
|
|
|
|
|
* а это не разрешено, так как весь код исходит из допущения, что в потоке блоков все типы одинаковые.
|
|
|
|
|
*/
|
|
|
|
|
res.emplace_back(new MaterializingBlockInputStream(interpreter.execute().in));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
size_t excess = (current_thread < remainder) ? 1 : 0;
|
|
|
|
|
size_t actual_pools_per_thread = pools_per_thread + excess;
|
|
|
|
|
|
|
|
|
|
if (actual_pools_per_thread == 1)
|
|
|
|
|
{
|
|
|
|
|
res.emplace_back(new RemoteBlockInputStream{
|
|
|
|
|
shard_info.pool, modified_query, &new_settings, throttler,
|
|
|
|
|
external_tables, processed_stage, context});
|
|
|
|
|
++current_thread;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
if (do_init)
|
|
|
|
|
{
|
|
|
|
|
pools = new ConnectionPools;
|
|
|
|
|
do_init = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pools->push_back(shard_info.pool);
|
|
|
|
|
if (pools->size() == actual_pools_per_thread)
|
|
|
|
|
{
|
|
|
|
|
res.emplace_back(new RemoteBlockInputStream{
|
|
|
|
|
pools, modified_query, &new_settings, throttler,
|
|
|
|
|
external_tables, processed_stage, context});
|
|
|
|
|
do_init = true;
|
|
|
|
|
++current_thread;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return res;
|
2012-05-21 20:38:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-09-10 20:43:42 +00:00
|
|
|
|
BlockOutputStreamPtr StorageDistributed::write(ASTPtr query, const Settings & settings)
|
2014-08-12 13:46:46 +00:00
|
|
|
|
{
|
|
|
|
|
if (!write_enabled)
|
|
|
|
|
throw Exception{
|
2014-08-21 12:07:29 +00:00
|
|
|
|
"Method write is not supported by storage " + getName() +
|
|
|
|
|
" with more than one shard and no sharding key provided",
|
|
|
|
|
ErrorCodes::STORAGE_REQUIRES_PARAMETER
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
return new DistributedBlockOutputStream{
|
|
|
|
|
*this,
|
2014-08-22 14:05:34 +00:00
|
|
|
|
rewriteInsertQuery(query, remote_database, remote_table)
|
2014-08-21 12:07:29 +00:00
|
|
|
|
};
|
2014-08-12 13:46:46 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
void StorageDistributed::alter(const AlterCommands & params, const String & database_name, const String & table_name, Context & context)
|
2013-09-23 12:01:19 +00:00
|
|
|
|
{
|
2014-07-11 08:12:03 +00:00
|
|
|
|
auto lock = lockStructureForAlter();
|
2014-10-16 13:37:01 +00:00
|
|
|
|
params.apply(*columns, materialized_columns, alias_columns, column_defaults);
|
|
|
|
|
InterpreterAlterQuery::updateMetadata(database_name, table_name,
|
|
|
|
|
*columns, materialized_columns, alias_columns, column_defaults, context);
|
2013-09-23 12:01:19 +00:00
|
|
|
|
}
|
2014-02-04 15:44:15 +00:00
|
|
|
|
|
2014-08-13 11:26:13 +00:00
|
|
|
|
void StorageDistributed::shutdown()
|
|
|
|
|
{
|
2014-08-19 08:04:13 +00:00
|
|
|
|
directory_monitors.clear();
|
2014-08-13 11:26:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
BlockInputStreams StorageDistributed::describe(const Context & context, const Settings & settings)
|
2015-10-12 14:53:16 +00:00
|
|
|
|
{
|
2016-01-26 02:00:25 +00:00
|
|
|
|
Settings new_settings = settings;
|
|
|
|
|
new_settings.queue_max_wait_ms = Cluster::saturate(new_settings.queue_max_wait_ms, settings.limits.max_execution_time);
|
|
|
|
|
/// Не имеет смысла на удалённых серверах, так как запрос отправляется обычно с другим user-ом.
|
|
|
|
|
new_settings.max_concurrent_queries_for_user = 0;
|
2015-10-12 14:53:16 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
/// Создать запрос DESCRIBE TABLE.
|
|
|
|
|
|
|
|
|
|
auto describe_query = new ASTDescribeQuery;
|
|
|
|
|
describe_query->database = remote_database;
|
|
|
|
|
describe_query->table = remote_table;
|
2015-10-12 14:53:16 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
ASTPtr ast = describe_query;
|
|
|
|
|
const auto query = queryToString(ast);
|
2015-10-12 14:53:16 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
/// Ограничение сетевого трафика, если нужно.
|
|
|
|
|
ThrottlerPtr throttler;
|
|
|
|
|
if (settings.limits.max_network_bandwidth || settings.limits.max_network_bytes)
|
|
|
|
|
throttler.reset(new Throttler(
|
|
|
|
|
settings.limits.max_network_bandwidth,
|
|
|
|
|
settings.limits.max_network_bytes,
|
|
|
|
|
"Limit for bytes to send or receive over network exceeded."));
|
2015-10-12 14:53:16 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
BlockInputStreams res;
|
2015-10-12 14:53:16 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
/// Распределить шарды равномерно по потокам.
|
|
|
|
|
|
|
|
|
|
size_t remote_count = 0;
|
|
|
|
|
for (const auto & shard_info : cluster.getShardsInfo())
|
2015-11-06 17:44:01 +00:00
|
|
|
|
{
|
2016-01-26 02:00:25 +00:00
|
|
|
|
if (shard_info.hasRemoteConnections())
|
|
|
|
|
++remote_count;
|
2015-11-06 17:44:01 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
size_t thread_count;
|
2015-11-06 17:44:01 +00:00
|
|
|
|
|
2015-11-12 01:17:44 +00:00
|
|
|
|
/** Функциональность shard_multiplexing не доделана - выключаем её.
|
|
|
|
|
* (Потому что установка соединений с разными шардами в рамках одного потока выполняется не параллельно.)
|
|
|
|
|
* Подробнее смотрите в https://███████████.yandex-team.ru/METR-18300
|
|
|
|
|
*/
|
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
/* if (remote_count == 0)
|
|
|
|
|
thread_count = 0;
|
|
|
|
|
else if (settings.max_distributed_processing_threads == 0)
|
|
|
|
|
thread_count = 1;
|
|
|
|
|
else
|
|
|
|
|
thread_count = std::min(remote_count, static_cast<size_t>(settings.max_distributed_processing_threads));
|
|
|
|
|
*/
|
|
|
|
|
thread_count = remote_count;
|
2015-11-06 17:44:01 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
size_t pools_per_thread = (thread_count > 0) ? (remote_count / thread_count) : 0;
|
|
|
|
|
size_t remainder = (thread_count > 0) ? (remote_count % thread_count) : 0;
|
2015-11-06 17:44:01 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
ConnectionPoolsPtr pools;
|
|
|
|
|
bool do_init = true;
|
2015-11-06 17:44:01 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
/// Цикл по шардам.
|
|
|
|
|
size_t current_thread = 0;
|
|
|
|
|
for (const auto & shard_info : cluster.getShardsInfo())
|
|
|
|
|
{
|
|
|
|
|
if (shard_info.isLocal())
|
|
|
|
|
{
|
|
|
|
|
/// Добавляем запросы к локальному ClickHouse.
|
|
|
|
|
|
|
|
|
|
DB::Context new_context = context;
|
|
|
|
|
new_context.setSettings(new_settings);
|
|
|
|
|
|
|
|
|
|
for (const auto & address : shard_info.local_addresses)
|
|
|
|
|
{
|
|
|
|
|
InterpreterDescribeQuery interpreter(ast, new_context);
|
|
|
|
|
BlockInputStreamPtr stream = new MaterializingBlockInputStream(interpreter.execute().in);
|
|
|
|
|
stream = new BlockExtraInfoInputStream(stream, toBlockExtraInfo(address));
|
|
|
|
|
res.emplace_back(stream);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (shard_info.hasRemoteConnections())
|
|
|
|
|
{
|
|
|
|
|
size_t excess = (current_thread < remainder) ? 1 : 0;
|
|
|
|
|
size_t actual_pools_per_thread = pools_per_thread + excess;
|
|
|
|
|
|
|
|
|
|
if (actual_pools_per_thread == 1)
|
|
|
|
|
{
|
|
|
|
|
auto stream = new RemoteBlockInputStream{shard_info.pool, query, &new_settings, throttler};
|
|
|
|
|
stream->doBroadcast();
|
|
|
|
|
stream->appendExtraInfo();
|
|
|
|
|
res.emplace_back(stream);
|
|
|
|
|
++current_thread;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
if (do_init)
|
|
|
|
|
{
|
|
|
|
|
pools = new ConnectionPools;
|
|
|
|
|
do_init = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pools->push_back(shard_info.pool);
|
|
|
|
|
if (pools->size() == actual_pools_per_thread)
|
|
|
|
|
{
|
|
|
|
|
auto stream = new RemoteBlockInputStream{pools, query, &new_settings, throttler};
|
|
|
|
|
stream->doBroadcast();
|
|
|
|
|
stream->appendExtraInfo();
|
|
|
|
|
res.emplace_back(stream);
|
|
|
|
|
|
|
|
|
|
do_init = true;
|
|
|
|
|
++current_thread;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-10-12 14:53:16 +00:00
|
|
|
|
|
2016-01-26 02:00:25 +00:00
|
|
|
|
return res;
|
2015-10-12 14:53:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-31 09:11:49 +00:00
|
|
|
|
NameAndTypePair StorageDistributed::getColumn(const String & column_name) const
|
|
|
|
|
{
|
2014-08-15 09:50:05 +00:00
|
|
|
|
if (const auto & type = VirtualColumnFactory::tryGetType(column_name))
|
|
|
|
|
return { column_name, type };
|
2014-07-31 09:11:49 +00:00
|
|
|
|
|
|
|
|
|
return getRealColumn(column_name);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool StorageDistributed::hasColumn(const String & column_name) const
|
|
|
|
|
{
|
2014-10-03 15:30:10 +00:00
|
|
|
|
return VirtualColumnFactory::hasColumn(column_name) || IStorage::hasColumn(column_name);
|
2014-07-31 09:11:49 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-13 11:26:13 +00:00
|
|
|
|
void StorageDistributed::createDirectoryMonitor(const std::string & name)
|
|
|
|
|
{
|
2015-02-10 21:10:58 +00:00
|
|
|
|
directory_monitors.emplace(name, std::make_unique<DirectoryMonitor>(*this, name));
|
2014-08-13 11:26:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-15 09:50:05 +00:00
|
|
|
|
void StorageDistributed::createDirectoryMonitors()
|
2014-08-13 11:26:13 +00:00
|
|
|
|
{
|
2014-12-30 03:53:41 +00:00
|
|
|
|
if (path.empty())
|
|
|
|
|
return;
|
|
|
|
|
|
2014-08-15 09:50:05 +00:00
|
|
|
|
Poco::File{path}.createDirectory();
|
2014-08-14 11:50:36 +00:00
|
|
|
|
|
2014-08-15 09:50:05 +00:00
|
|
|
|
Poco::DirectoryIterator end;
|
|
|
|
|
for (Poco::DirectoryIterator it{path}; it != end; ++it)
|
|
|
|
|
if (it->isDirectory())
|
|
|
|
|
createDirectoryMonitor(it.name());
|
2014-08-13 11:26:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-19 08:04:13 +00:00
|
|
|
|
void StorageDistributed::requireDirectoryMonitor(const std::string & name)
|
|
|
|
|
{
|
|
|
|
|
if (!directory_monitors.count(name))
|
|
|
|
|
createDirectoryMonitor(name);
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-18 13:36:10 +00:00
|
|
|
|
size_t StorageDistributed::getShardCount() const
|
|
|
|
|
{
|
2015-10-20 14:59:29 +00:00
|
|
|
|
return cluster.getRemoteShardCount();
|
2015-09-18 13:36:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-05-21 20:38:34 +00:00
|
|
|
|
}
|