mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-13 01:41:59 +00:00
Revert "Merge pull request #51805 from ClickHouse/fix_for_parallel_replicas_and_empty_header"
This reverts commit2c592a6a2c
, reversing changes made to7a593ed9a5
.
This commit is contained in:
parent
627e924a1b
commit
9a5f357412
@ -7189,10 +7189,7 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage(
|
||||
if (query_context->canUseParallelReplicasOnInitiator() && to_stage >= QueryProcessingStage::WithMergeableState)
|
||||
{
|
||||
if (!canUseParallelReplicasBasedOnPKAnalysis(query_context, storage_snapshot, query_info))
|
||||
{
|
||||
query_info.parallel_replicas_disabled = true;
|
||||
return QueryProcessingStage::Enum::FetchColumns;
|
||||
}
|
||||
|
||||
/// ReplicatedMergeTree
|
||||
if (supportsReplication())
|
||||
|
@ -255,8 +255,6 @@ struct SelectQueryInfo
|
||||
Block minmax_count_projection_block;
|
||||
MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr;
|
||||
|
||||
bool parallel_replicas_disabled = false;
|
||||
|
||||
bool is_parameterized_view = false;
|
||||
NameToNameMap parameterized_view_values;
|
||||
|
||||
|
@ -209,9 +209,7 @@ void StorageMergeTree::read(
|
||||
size_t max_block_size,
|
||||
size_t num_streams)
|
||||
{
|
||||
if (!query_info.parallel_replicas_disabled &&
|
||||
local_context->canUseParallelReplicasOnInitiator() &&
|
||||
local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
|
||||
if (local_context->canUseParallelReplicasOnInitiator() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
|
||||
@ -242,10 +240,7 @@ void StorageMergeTree::read(
|
||||
}
|
||||
else
|
||||
{
|
||||
const bool enable_parallel_reading =
|
||||
!query_info.parallel_replicas_disabled &&
|
||||
local_context->canUseParallelReplicasOnFollower() &&
|
||||
local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
|
||||
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
|
||||
|
||||
if (auto plan = reader.read(
|
||||
column_names, storage_snapshot, query_info,
|
||||
|
@ -1,4 +0,0 @@
|
||||
-- count() ------------------------------
|
||||
2
|
||||
-- count() with parallel replicas -------
|
||||
2
|
@ -1,24 +0,0 @@
|
||||
DROP TABLE IF EXISTS users;
|
||||
CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree() ORDER BY uid;
|
||||
|
||||
INSERT INTO users VALUES (111, 'JFK', 33);
|
||||
INSERT INTO users VALUES (6666, 'KLM', 48);
|
||||
INSERT INTO users VALUES (88888, 'AMS', 50);
|
||||
|
||||
SELECT '-- count() ------------------------------';
|
||||
SELECT count() FROM users PREWHERE uid > 2000;
|
||||
|
||||
-- enable parallel replicas but with high granules threshold
|
||||
SET
|
||||
skip_unavailable_shards=1,
|
||||
allow_experimental_parallel_reading_from_replicas=1,
|
||||
max_parallel_replicas=3,
|
||||
use_hedged_requests=0,
|
||||
cluster_for_parallel_replicas='parallel_replicas',
|
||||
parallel_replicas_for_non_replicated_merge_tree=1,
|
||||
parallel_replicas_min_number_of_granules_to_enable=1000;
|
||||
|
||||
SELECT '-- count() with parallel replicas -------';
|
||||
SELECT count() FROM users PREWHERE uid > 2000;
|
||||
|
||||
DROP TABLE users;
|
Loading…
Reference in New Issue
Block a user