mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Split StorageReplicatedMergeTree reading methods
Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
This commit is contained in:
parent
323128df6f
commit
4a33e027c5
@ -4902,29 +4902,55 @@ void StorageReplicatedMergeTree::read(
|
||||
snapshot_data.alter_conversions = {};
|
||||
});
|
||||
|
||||
/** The `select_sequential_consistency` setting has two meanings:
|
||||
* 1. To throw an exception if on a replica there are not all parts which have been written down on quorum of remaining replicas.
|
||||
* 2. Do not read parts that have not yet been written to the quorum of the replicas.
|
||||
* For this you have to synchronously go to ZooKeeper.
|
||||
*/
|
||||
if (local_context->getSettingsRef().select_sequential_consistency)
|
||||
{
|
||||
auto max_added_blocks = std::make_shared<ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock>(getMaxAddedBlocks());
|
||||
if (auto plan = reader.read(
|
||||
column_names, storage_snapshot, query_info, local_context,
|
||||
max_block_size, num_streams, processed_stage, std::move(max_added_blocks), /*enable_parallel_reading*/false))
|
||||
query_plan = std::move(*plan);
|
||||
return;
|
||||
}
|
||||
const auto & settings = local_context->getSettingsRef();
|
||||
|
||||
/// The `select_sequential_consistency` setting has two meanings:
|
||||
/// 1. To throw an exception if on a replica there are not all parts which have been written down on quorum of remaining replicas.
|
||||
/// 2. Do not read parts that have not yet been written to the quorum of the replicas.
|
||||
/// For this you have to synchronously go to ZooKeeper.
|
||||
if (settings.select_sequential_consistency)
|
||||
return readLocalSequentialConsistencyImpl(query_plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams);
|
||||
|
||||
if (local_context->canUseParallelReplicasOnInitiator())
|
||||
{
|
||||
return readParallelReplicasImpl(query_plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams);
|
||||
|
||||
readLocalImpl(query_plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams);
|
||||
}
|
||||
|
||||
void StorageReplicatedMergeTree::readLocalSequentialConsistencyImpl(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
ContextPtr local_context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
size_t max_block_size,
|
||||
size_t num_streams)
|
||||
{
|
||||
auto max_added_blocks = std::make_shared<ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock>(getMaxAddedBlocks());
|
||||
auto plan = reader.read(column_names, storage_snapshot, query_info, local_context,
|
||||
max_block_size, num_streams, processed_stage, std::move(max_added_blocks),
|
||||
/* enable_parallel_reading= */false);
|
||||
if (plan)
|
||||
query_plan = std::move(*plan);
|
||||
}
|
||||
|
||||
void StorageReplicatedMergeTree::readParallelReplicasImpl(
|
||||
QueryPlan & query_plan,
|
||||
const Names & /*column_names*/,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
ContextPtr local_context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
const size_t /*max_block_size*/,
|
||||
const size_t /*num_streams*/)
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
|
||||
auto parallel_replicas_cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas);
|
||||
|
||||
ASTPtr modified_query_ast;
|
||||
|
||||
Block header;
|
||||
|
||||
if (local_context->getSettingsRef().allow_experimental_analyzer)
|
||||
{
|
||||
auto modified_query_tree = buildQueryTreeForShard(query_info, query_info.query_tree);
|
||||
@ -4941,28 +4967,37 @@ void StorageReplicatedMergeTree::read(
|
||||
= InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock();
|
||||
}
|
||||
|
||||
auto cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas);
|
||||
|
||||
ClusterProxy::SelectStreamFactory select_stream_factory =
|
||||
ClusterProxy::SelectStreamFactory(
|
||||
ClusterProxy::SelectStreamFactory select_stream_factory = ClusterProxy::SelectStreamFactory(
|
||||
header,
|
||||
{},
|
||||
storage_snapshot,
|
||||
processed_stage);
|
||||
|
||||
ClusterProxy::executeQueryWithParallelReplicas(
|
||||
query_plan, getStorageID(), /*remove_table_function_ptr*/ nullptr,
|
||||
query_plan, getStorageID(),
|
||||
/* table_func_ptr= */ nullptr,
|
||||
select_stream_factory, modified_query_ast,
|
||||
local_context, query_info, cluster);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (auto plan = reader.read(
|
||||
local_context, query_info, parallel_replicas_cluster);
|
||||
}
|
||||
|
||||
void StorageReplicatedMergeTree::readLocalImpl(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
ContextPtr local_context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
const size_t max_block_size,
|
||||
const size_t num_streams)
|
||||
{
|
||||
auto plan = reader.read(
|
||||
column_names, storage_snapshot, query_info,
|
||||
local_context, max_block_size, num_streams,
|
||||
processed_stage, nullptr, /*enable_parallel_reading*/local_context->canUseParallelReplicasOnFollower()))
|
||||
processed_stage,
|
||||
/* max_block_numbers_to_read= */ nullptr,
|
||||
/* enable_parallel_reading= */ local_context->canUseParallelReplicasOnFollower());
|
||||
if (plan)
|
||||
query_plan = std::move(*plan);
|
||||
}
|
||||
}
|
||||
|
||||
template <class Func>
|
||||
|
@ -130,7 +130,7 @@ public:
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
ContextPtr context,
|
||||
ContextPtr local_context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
size_t max_block_size,
|
||||
size_t num_streams) override;
|
||||
@ -513,6 +513,36 @@ private:
|
||||
|
||||
static std::optional<QueryPipeline> distributedWriteFromClusterStorage(const std::shared_ptr<IStorageCluster> & src_storage_cluster, const ASTInsertQuery & query, ContextPtr context);
|
||||
|
||||
void readLocalImpl(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
ContextPtr local_context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
size_t max_block_size,
|
||||
size_t num_streams);
|
||||
|
||||
void readLocalSequentialConsistencyImpl(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
ContextPtr local_context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
size_t max_block_size,
|
||||
size_t num_streams);
|
||||
|
||||
void readParallelReplicasImpl(
|
||||
QueryPlan & query_plan,
|
||||
const Names & column_names,
|
||||
const StorageSnapshotPtr & storage_snapshot,
|
||||
SelectQueryInfo & query_info,
|
||||
ContextPtr local_context,
|
||||
QueryProcessingStage::Enum processed_stage,
|
||||
size_t max_block_size,
|
||||
size_t num_streams);
|
||||
|
||||
template <class Func>
|
||||
void foreachActiveParts(Func && func, bool select_sequential_consistency) const;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user