Merge pull request #51805 from ClickHouse/fix_for_parallel_replicas_and_empty_header

Fix for parallel replicas not completely disabled by granule count threshold
This commit is contained in:
Alexey Milovidov 2023-07-06 01:46:52 +03:00 committed by GitHub
commit 2c592a6a2c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 40 additions and 2 deletions

View File

@ -7196,7 +7196,10 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage(
if (query_context->canUseParallelReplicasOnInitiator() && to_stage >= QueryProcessingStage::WithMergeableState) if (query_context->canUseParallelReplicasOnInitiator() && to_stage >= QueryProcessingStage::WithMergeableState)
{ {
if (!canUseParallelReplicasBasedOnPKAnalysis(query_context, storage_snapshot, query_info)) if (!canUseParallelReplicasBasedOnPKAnalysis(query_context, storage_snapshot, query_info))
{
query_info.parallel_replicas_disabled = true;
return QueryProcessingStage::Enum::FetchColumns; return QueryProcessingStage::Enum::FetchColumns;
}
/// ReplicatedMergeTree /// ReplicatedMergeTree
if (supportsReplication()) if (supportsReplication())

View File

@ -255,6 +255,8 @@ struct SelectQueryInfo
Block minmax_count_projection_block; Block minmax_count_projection_block;
MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr; MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr;
bool parallel_replicas_disabled = false;
bool is_parameterized_view = false; bool is_parameterized_view = false;
NameToNameMap parameterized_view_values; NameToNameMap parameterized_view_values;

View File

@ -209,7 +209,9 @@ void StorageMergeTree::read(
size_t max_block_size, size_t max_block_size,
size_t num_streams) size_t num_streams)
{ {
if (local_context->canUseParallelReplicasOnInitiator() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree) if (!query_info.parallel_replicas_disabled &&
local_context->canUseParallelReplicasOnInitiator() &&
local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
{ {
auto table_id = getStorageID(); auto table_id = getStorageID();
@ -240,7 +242,10 @@ void StorageMergeTree::read(
} }
else else
{ {
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree; const bool enable_parallel_reading =
!query_info.parallel_replicas_disabled &&
local_context->canUseParallelReplicasOnFollower() &&
local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
if (auto plan = reader.read( if (auto plan = reader.read(
column_names, storage_snapshot, query_info, column_names, storage_snapshot, query_info,

View File

@ -0,0 +1,4 @@
-- count() ------------------------------
2
-- count() with parallel replicas -------
2

View File

@ -0,0 +1,24 @@
DROP TABLE IF EXISTS users;
CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree() ORDER BY uid;
INSERT INTO users VALUES (111, 'JFK', 33);
INSERT INTO users VALUES (6666, 'KLM', 48);
INSERT INTO users VALUES (88888, 'AMS', 50);
SELECT '-- count() ------------------------------';
SELECT count() FROM users PREWHERE uid > 2000;
-- enable parallel replicas but with high granules threshold
SET
skip_unavailable_shards=1,
allow_experimental_parallel_reading_from_replicas=1,
max_parallel_replicas=3,
use_hedged_requests=0,
cluster_for_parallel_replicas='parallel_replicas',
parallel_replicas_for_non_replicated_merge_tree=1,
parallel_replicas_min_number_of_granules_to_enable=1000;
SELECT '-- count() with parallel replicas -------';
SELECT count() FROM users PREWHERE uid > 2000;
DROP TABLE users;