Parallel replicas: friendly settings (#57542)

This commit is contained in:
Igor Nikonov 2023-12-13 14:42:06 +01:00 committed by GitHub
parent de4afd3b54
commit c165be76ab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 114 additions and 72 deletions

View File

@ -151,7 +151,7 @@ function run_tests()
set +e
if [[ -n "$USE_PARALLEL_REPLICAS" ]] && [[ "$USE_PARALLEL_REPLICAS" -eq 1 ]]; then
clickhouse-test --client="clickhouse-client --use_hedged_requests=0 --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 \
clickhouse-test --client="clickhouse-client --allow_experimental_parallel_reading_from_replicas=1 --parallel_replicas_for_non_replicated_merge_tree=1 \
--max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'" \
-j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --no-parallel-replicas --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt

View File

@ -30,6 +30,7 @@ namespace ErrorCodes
{
extern const int TOO_LARGE_DISTRIBUTED_DEPTH;
extern const int LOGICAL_ERROR;
extern const int CLUSTER_DOESNT_EXIST;
}
namespace ClusterProxy
@ -322,11 +323,44 @@ void executeQueryWithParallelReplicas(
SelectStreamFactory & stream_factory,
const ASTPtr & query_ast,
ContextPtr context,
std::shared_ptr<const StorageLimitsList> storage_limits,
const ClusterPtr & not_optimized_cluster)
std::shared_ptr<const StorageLimitsList> storage_limits)
{
const auto & settings = context->getSettingsRef();
/// check cluster for parallel replicas
if (settings.cluster_for_parallel_replicas.value.empty())
{
throw Exception(
ErrorCodes::CLUSTER_DOESNT_EXIST,
"Reading in parallel from replicas is enabled but cluster to execute query is not provided. Please set "
"'cluster_for_parallel_replicas' setting");
}
auto not_optimized_cluster = context->getCluster(settings.cluster_for_parallel_replicas);
auto new_context = Context::createCopy(context);
/// check hedged connections setting
if (settings.use_hedged_requests.value)
{
if (settings.use_hedged_requests.changed)
{
LOG_WARNING(
&Poco::Logger::get("executeQueryWithParallelReplicas"),
"Setting 'use_hedged_requests' explicitly with enabled 'allow_experimental_parallel_reading_from_replicas' has no effect. "
"Hedged connections are not used for parallel reading from replicas");
}
else
{
LOG_INFO(
&Poco::Logger::get("executeQueryWithParallelReplicas"),
"Disabling 'use_hedged_requests' in favor of 'allow_experimental_parallel_reading_from_replicas'. Hedged connections are "
"not used for parallel reading from replicas");
}
/// disable hedged connections -> parallel replicas uses own logic to choose replicas
new_context->setSetting("use_hedged_requests", Field{false});
}
auto scalars = new_context->hasQueryContext() ? new_context->getQueryContext()->getScalars() : Scalars{};
UInt64 shard_num = 0; /// shard_num is 1-based, so 0 - no shard specified

View File

@ -71,8 +71,7 @@ void executeQueryWithParallelReplicas(
SelectStreamFactory & stream_factory,
const ASTPtr & query_ast,
ContextPtr context,
std::shared_ptr<const StorageLimitsList> storage_limits,
const ClusterPtr & not_optimized_cluster);
std::shared_ptr<const StorageLimitsList> storage_limits);
}
}

View File

@ -5020,7 +5020,7 @@ Context::ParallelReplicasMode Context::getParallelReplicasMode() const
if (!settings_ref.parallel_replicas_custom_key.value.empty())
return CUSTOM_KEY;
if (settings_ref.allow_experimental_parallel_reading_from_replicas > 0 && !settings_ref.use_hedged_requests)
if (settings_ref.allow_experimental_parallel_reading_from_replicas > 0)
return READ_TASKS;
return SAMPLE_KEY;

View File

@ -211,17 +211,12 @@ void StorageMergeTree::read(
{
if (local_context->canUseParallelReplicasOnInitiator() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
{
auto table_id = getStorageID();
const auto table_id = getStorageID();
const auto & modified_query_ast = ClusterProxy::rewriteSelectQuery(
local_context, query_info.query,
table_id.database_name, table_id.table_name, /*remote_table_function_ptr*/nullptr);
String cluster_for_parallel_replicas = local_context->getSettingsRef().cluster_for_parallel_replicas;
auto cluster = local_context->getCluster(cluster_for_parallel_replicas);
Block header;
if (local_context->getSettingsRef().allow_experimental_analyzer)
header = InterpreterSelectQueryAnalyzer::getSampleBlock(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze());
else
@ -240,17 +235,22 @@ void StorageMergeTree::read(
select_stream_factory,
modified_query_ast,
local_context,
query_info.storage_limits,
cluster);
query_info.storage_limits);
}
else
{
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
if (auto plan = reader.read(
column_names, storage_snapshot, query_info,
local_context, max_block_size, num_streams,
processed_stage, nullptr, enable_parallel_reading))
column_names,
storage_snapshot,
query_info,
local_context,
max_block_size,
num_streams,
processed_stage,
nullptr,
enable_parallel_reading))
query_plan = std::move(*plan);
}
}

View File

@ -5338,7 +5338,7 @@ void StorageReplicatedMergeTree::read(
return readLocalSequentialConsistencyImpl(query_plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams);
if (local_context->canUseParallelReplicasOnInitiator())
return readParallelReplicasImpl(query_plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams);
return readParallelReplicasImpl(query_plan, column_names, storage_snapshot, query_info, local_context, processed_stage);
readLocalImpl(query_plan, column_names, storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams);
}
@ -5367,18 +5367,11 @@ void StorageReplicatedMergeTree::readParallelReplicasImpl(
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & query_info,
ContextPtr local_context,
QueryProcessingStage::Enum processed_stage,
const size_t /*max_block_size*/,
const size_t /*num_streams*/)
QueryProcessingStage::Enum processed_stage)
{
auto table_id = getStorageID();
auto scalars = local_context->hasQueryContext() ? local_context->getQueryContext()->getScalars() : Scalars{};
String cluster_for_parallel_replicas = local_context->getSettingsRef().cluster_for_parallel_replicas;
auto parallel_replicas_cluster = local_context->getCluster(cluster_for_parallel_replicas);
ASTPtr modified_query_ast;
Block header;
if (local_context->getSettingsRef().allow_experimental_analyzer)
{
auto modified_query_tree = buildQueryTreeForShard(query_info, query_info.query_tree);
@ -5389,6 +5382,7 @@ void StorageReplicatedMergeTree::readParallelReplicasImpl(
}
else
{
const auto table_id = getStorageID();
modified_query_ast = ClusterProxy::rewriteSelectQuery(local_context, query_info.query,
table_id.database_name, table_id.table_name, /*remote_table_function_ptr*/nullptr);
header
@ -5407,8 +5401,7 @@ void StorageReplicatedMergeTree::readParallelReplicasImpl(
select_stream_factory,
modified_query_ast,
local_context,
query_info.storage_limits,
parallel_replicas_cluster);
query_info.storage_limits);
}
void StorageReplicatedMergeTree::readLocalImpl(

View File

@ -582,9 +582,7 @@ private:
const StorageSnapshotPtr & storage_snapshot,
SelectQueryInfo & query_info,
ContextPtr local_context,
QueryProcessingStage::Enum processed_stage,
size_t max_block_size,
size_t num_streams);
QueryProcessingStage::Enum processed_stage);
template <class Func>
void foreachActiveParts(Func && func, bool select_sequential_consistency) const;

View File

@ -119,7 +119,6 @@ def test_read_equally_from_each_replica(start_cluster, prefer_localhost_replica)
"allow_experimental_parallel_reading_from_replicas": 2,
"prefer_localhost_replica": prefer_localhost_replica,
"max_parallel_replicas": 3,
"use_hedged_requests": 0,
},
)
== expected_result

View File

@ -84,7 +84,6 @@ def test_skip_unavailable_shards(start_cluster, prefer_localhost_replica):
settings={
"allow_experimental_parallel_reading_from_replicas": 2,
"max_parallel_replicas": 3,
"use_hedged_requests": 0,
"prefer_localhost_replica": prefer_localhost_replica,
"skip_unavailable_shards": 1,
"connections_with_failover_max_tries": 0, # just don't wait for unavailable replicas
@ -119,7 +118,6 @@ def test_error_on_unavailable_shards(start_cluster, prefer_localhost_replica):
settings={
"allow_experimental_parallel_reading_from_replicas": 2,
"max_parallel_replicas": 3,
"use_hedged_requests": 0,
"prefer_localhost_replica": prefer_localhost_replica,
"skip_unavailable_shards": 0,
},
@ -155,7 +153,6 @@ def test_no_unavailable_shards(start_cluster, skip_unavailable_shards):
settings={
"allow_experimental_parallel_reading_from_replicas": 2,
"max_parallel_replicas": 3,
"use_hedged_requests": 0,
"prefer_localhost_replica": 0,
"skip_unavailable_shards": skip_unavailable_shards,
},

View File

@ -137,7 +137,6 @@ def test_parallel_replicas_over_distributed(
"allow_experimental_parallel_reading_from_replicas": 2,
"prefer_localhost_replica": prefer_localhost_replica,
"max_parallel_replicas": max_parallel_replicas,
"use_hedged_requests": 0,
},
)
== expected_result

View File

@ -38,7 +38,6 @@ def test_skip_unavailable_shards(start_cluster):
settings={
"allow_experimental_parallel_reading_from_replicas": 2,
"max_parallel_replicas": 3,
"use_hedged_requests": 0,
"skip_unavailable_shards": 1,
# "async_socket_for_remote" : 0,
# "async_query_sending_for_remote" : 0,
@ -65,7 +64,6 @@ def test_error_on_unavailable_shards(start_cluster):
settings={
"allow_experimental_parallel_reading_from_replicas": 2,
"max_parallel_replicas": 3,
"use_hedged_requests": 0,
"skip_unavailable_shards": 0,
},
)

View File

@ -17,7 +17,6 @@ opts=(
--allow_experimental_parallel_reading_from_replicas 1
--parallel_replicas_for_non_replicated_merge_tree 1
--max_parallel_replicas 3
--use_hedged_requests 0
--cluster_for_parallel_replicas parallel_replicas
--iterations 1

View File

@ -23,7 +23,6 @@ SET allow_experimental_analyzer = 0;
SET max_parallel_replicas = 3;
SET prefer_localhost_replica = 1;
SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost';
SET use_hedged_requests = 0;
SET joined_subquery_requires_alias = 0;
SELECT '=============== INNER QUERY (NO PARALLEL) ===============';

View File

@ -18,7 +18,7 @@ INSERT INTO join_inner_table__fuzz_1 SELECT
FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2)
LIMIT 100;
SET max_parallel_replicas = 3, prefer_localhost_replica = 1, use_hedged_requests = 0, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 1;
SET max_parallel_replicas = 3, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', allow_experimental_parallel_reading_from_replicas = 1;
-- SELECT query will write a Warning to the logs
SET send_logs_level='error';

View File

@ -1,7 +1,7 @@
CREATE TABLE IF NOT EXISTS parallel_replicas_plain (x String) ENGINE=MergeTree() ORDER BY x;
INSERT INTO parallel_replicas_plain SELECT toString(number) FROM numbers(10);
SET max_parallel_replicas=3, allow_experimental_parallel_reading_from_replicas=1, use_hedged_requests=0, cluster_for_parallel_replicas='parallel_replicas';
SET max_parallel_replicas=3, allow_experimental_parallel_reading_from_replicas=1, cluster_for_parallel_replicas='parallel_replicas';
SET send_logs_level='error';
SET parallel_replicas_for_non_replicated_merge_tree = 0;

View File

@ -2,7 +2,7 @@ CREATE TABLE IF NOT EXISTS parallel_replicas_final (x String) ENGINE=ReplacingMe
INSERT INTO parallel_replicas_final SELECT toString(number) FROM numbers(10);
SET max_parallel_replicas=3, allow_experimental_parallel_reading_from_replicas=1, use_hedged_requests=0, cluster_for_parallel_replicas='parallel_replicas';
SET max_parallel_replicas=3, allow_experimental_parallel_reading_from_replicas=1, cluster_for_parallel_replicas='parallel_replicas';
SET parallel_replicas_for_non_replicated_merge_tree = 1;
SELECT * FROM parallel_replicas_final FINAL FORMAT Null;

View File

@ -4,7 +4,7 @@ INSERT INTO test_parallel_replicas_unavailable_shards SELECT * FROM numbers(10);
SYSTEM FLUSH LOGS;
SET allow_experimental_parallel_reading_from_replicas=2, max_parallel_replicas=11, use_hedged_requests=0, cluster_for_parallel_replicas='parallel_replicas', parallel_replicas_for_non_replicated_merge_tree=1;
SET allow_experimental_parallel_reading_from_replicas=2, max_parallel_replicas=11, cluster_for_parallel_replicas='parallel_replicas', parallel_replicas_for_non_replicated_merge_tree=1;
SET send_logs_level='error';
SELECT count() FROM test_parallel_replicas_unavailable_shards WHERE NOT ignore(*);

View File

@ -8,5 +8,5 @@
5935810273536892891
7885388429666205427
8124171311239967992
1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n allow_experimental_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1,\n use_hedged_requests = 0;
0 2 SELECT `join_inner_table__fuzz_146_replicated`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` SETTINGS allow_experimental_analyzer = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\', allow_experimental_parallel_reading_from_replicas = 1, use_hedged_requests = 0
1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n allow_experimental_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1;
0 2 SELECT `join_inner_table__fuzz_146_replicated`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` SETTINGS allow_experimental_analyzer = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\', allow_experimental_parallel_reading_from_replicas = 1

View File

@ -24,8 +24,7 @@ FROM join_inner_table__fuzz_146_replicated
allow_experimental_analyzer = 1,
max_parallel_replicas = 2,
cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost',
allow_experimental_parallel_reading_from_replicas = 1,
use_hedged_requests = 0;
allow_experimental_parallel_reading_from_replicas = 1;
SYSTEM FLUSH LOGS;
-- There should be 2 different queries

View File

@ -30,7 +30,6 @@ function run_query_with_pure_parallel_replicas () {
--query_id "${1}_pure" \
--max_parallel_replicas 3 \
--prefer_localhost_replica 1 \
--use_hedged_requests 0 \
--cluster_for_parallel_replicas 'test_cluster_one_shard_three_replicas_localhost' \
--allow_experimental_parallel_reading_from_replicas 1 \
--allow_experimental_analyzer 0
@ -40,7 +39,6 @@ function run_query_with_pure_parallel_replicas () {
--query_id "${1}_pure_analyzer" \
--max_parallel_replicas 3 \
--prefer_localhost_replica 1 \
--use_hedged_requests 0 \
--cluster_for_parallel_replicas 'test_cluster_one_shard_three_replicas_localhost' \
--allow_experimental_parallel_reading_from_replicas 1 \
--allow_experimental_analyzer 1
@ -56,7 +54,6 @@ function run_query_with_custom_key_parallel_replicas () {
--query "$2" \
--query_id "${1}_custom_key" \
--max_parallel_replicas 3 \
--use_hedged_requests 0 \
--parallel_replicas_custom_key_filter_type 'default' \
--parallel_replicas_custom_key "$2" \
--allow_experimental_analyzer 0
@ -65,7 +62,6 @@ function run_query_with_custom_key_parallel_replicas () {
--query "$2" \
--query_id "${1}_custom_key_analyzer" \
--max_parallel_replicas 3 \
--use_hedged_requests 0 \
--parallel_replicas_custom_key_filter_type 'default' \
--parallel_replicas_custom_key "$2" \
--allow_experimental_analyzer 1

View File

@ -49,7 +49,6 @@ function run_query_with_pure_parallel_replicas () {
--query_id "${1}_pure" \
--max_parallel_replicas 3 \
--prefer_localhost_replica 1 \
--use_hedged_requests 0 \
--cluster_for_parallel_replicas "parallel_replicas" \
--allow_experimental_parallel_reading_from_replicas 1 \
--parallel_replicas_for_non_replicated_merge_tree 1 \

View File

@ -64,7 +64,6 @@ function run_query_with_pure_parallel_replicas () {
--query_id "${1}_pure" \
--max_parallel_replicas 3 \
--prefer_localhost_replica 1 \
--use_hedged_requests 0 \
--cluster_for_parallel_replicas "parallel_replicas" \
--allow_experimental_parallel_reading_from_replicas 1 \
--parallel_replicas_for_non_replicated_merge_tree 1 \

View File

@ -13,7 +13,6 @@ SET
skip_unavailable_shards=1,
allow_experimental_parallel_reading_from_replicas=1,
max_parallel_replicas=3,
use_hedged_requests=0,
cluster_for_parallel_replicas='parallel_replicas',
parallel_replicas_for_non_replicated_merge_tree=1,
parallel_replicas_min_number_of_rows_per_replica=1000;

View File

@ -14,13 +14,13 @@ insert into test select *, today() from numbers(100);
SELECT count(), min(id), max(id), avg(id)
FROM test_d
SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, use_hedged_requests=0;
SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1;
insert into test select *, today() from numbers(100);
SELECT count(), min(id), max(id), avg(id)
FROM test_d
SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, use_hedged_requests=0;
SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1;
-- 2 shards
@ -38,10 +38,10 @@ insert into test2 select *, today() from numbers(100);
SELECT count(), min(id), max(id), avg(id)
FROM test2_d
SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, use_hedged_requests=0;
SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1;
insert into test2 select *, today() from numbers(100);
SELECT count(), min(id), max(id), avg(id)
FROM test2_d
SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1, use_hedged_requests=0;
SETTINGS allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree=1;

View File

@ -36,7 +36,6 @@ echo "
cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost',
allow_experimental_parallel_reading_from_replicas = 2,
parallel_replicas_for_non_replicated_merge_tree = 1,
use_hedged_requests = 0,
interactive_delay=0
"\
| ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query_id=${query_id_base}_interactive_0" --data-binary @- -vvv 2>&1 \
@ -51,7 +50,6 @@ echo "
cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost',
allow_experimental_parallel_reading_from_replicas = 2,
parallel_replicas_for_non_replicated_merge_tree = 1,
use_hedged_requests = 0,
interactive_delay=99999999999
"\
| ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&wait_end_of_query=1&query_id=${query_id_base}_interactive_high" --data-binary @- -vvv 2>&1 \

View File

@ -2,5 +2,5 @@
DROP TABLE IF EXISTS set_index__fuzz_41;
CREATE TABLE set_index__fuzz_41 (`a` Date, `b` Nullable(DateTime64(3)), INDEX b_set b TYPE set(0) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO set_index__fuzz_41 (a) VALUES (today());
SELECT b FROM set_index__fuzz_41 WHERE and(b = 256) SETTINGS force_data_skipping_indices = 'b_set', optimize_move_to_prewhere = 0, max_parallel_replicas=2, parallel_replicas_for_non_replicated_merge_tree=1, allow_experimental_parallel_reading_from_replicas=2, use_hedged_requests=0; -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION }
SELECT b FROM set_index__fuzz_41 WHERE and(b = 256) SETTINGS force_data_skipping_indices = 'b_set', optimize_move_to_prewhere = 0, max_parallel_replicas=2, parallel_replicas_for_non_replicated_merge_tree=1, allow_experimental_parallel_reading_from_replicas=2; -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION }
DROP TABLE set_index__fuzz_41;

View File

@ -24,5 +24,5 @@ system sync replica t3;
SELECT count(), min(k), max(k), avg(k)
FROM t1
SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 3, prefer_localhost_replica = 0, use_hedged_requests=0,
SETTINGS allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 3, prefer_localhost_replica = 0,
cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_single_task_marks_count_multiplier = 0.001;

View File

@ -2,7 +2,7 @@ DROP TABLE IF EXISTS tt;
CREATE TABLE tt (n UInt64) ENGINE=MergeTree() ORDER BY tuple();
INSERT INTO tt SELECT * FROM numbers(10);
SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, use_hedged_requests=0, parallel_replicas_for_non_replicated_merge_tree=1;
SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1;
SELECT count() FROM clusterAllReplicas('test_cluster_two_shard_three_replicas_localhost', currentDatabase(), tt) settings log_comment='02875_190aed82-2423-413b-ad4c-24dcca50f65b';
SYSTEM FLUSH LOGS;

View File

@ -2,7 +2,7 @@ DROP TABLE IF EXISTS tt;
CREATE TABLE tt (n UInt64) ENGINE=MergeTree() ORDER BY tuple();
INSERT INTO tt SELECT * FROM numbers(10);
SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, use_hedged_requests=0, parallel_replicas_for_non_replicated_merge_tree=1;
SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1;
SELECT count() FROM remote('127.0.0.{1..6}', currentDatabase(), tt) settings log_comment='02875_89f3c39b-1919-48cb-b66e-ef9904e73146';
SYSTEM FLUSH LOGS;

View File

@ -14,7 +14,7 @@ system sync replica t1;
system sync replica t2;
system sync replica t3;
SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, use_hedged_requests=0, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost';
SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost';
-- default coordinator
SELECT count(), min(k), max(k), avg(k) FROM t1 SETTINGS log_comment='02898_default_190aed82-2423-413b-ad4c-24dcca50f65b';

View File

@ -29,7 +29,6 @@ $CLICKHOUSE_CLIENT \
--query_id "${query_id}" \
--max_parallel_replicas 3 \
--prefer_localhost_replica 1 \
--use_hedged_requests 0 \
--cluster_for_parallel_replicas "parallel_replicas" \
--allow_experimental_parallel_reading_from_replicas 1 \
--parallel_replicas_for_non_replicated_merge_tree 1 \
@ -63,7 +62,6 @@ $CLICKHOUSE_CLIENT \
--query_id "${query_id}" \
--max_parallel_replicas 3 \
--prefer_localhost_replica 1 \
--use_hedged_requests 0 \
--cluster_for_parallel_replicas "parallel_replicas" \
--allow_experimental_parallel_reading_from_replicas 1 \
--parallel_replicas_for_non_replicated_merge_tree 1 \

View File

@ -0,0 +1,4 @@
10
1
10
1

View File

@ -0,0 +1,35 @@
DROP TABLE IF EXISTS test_parallel_replicas_settings;
CREATE TABLE test_parallel_replicas_settings (n UInt64) ENGINE=MergeTree() ORDER BY tuple();
INSERT INTO test_parallel_replicas_settings SELECT * FROM numbers(10);
SET allow_experimental_parallel_reading_from_replicas=2, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1;
SET cluster_for_parallel_replicas='';
SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*); -- { serverError CLUSTER_DOESNT_EXIST }
SET cluster_for_parallel_replicas='parallel_replicas';
SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*) settings log_comment='0_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f';
SYSTEM FLUSH LOGS;
SELECT count() > 0 FROM system.text_log
WHERE yesterday() <= event_date
AND query_id in (select query_id from system.query_log where current_database=currentDatabase() AND log_comment='0_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f')
AND level = 'Information'
AND message ILIKE '%Disabling ''use_hedged_requests'' in favor of ''allow_experimental_parallel_reading_from_replicas''%'
SETTINGS allow_experimental_parallel_reading_from_replicas=0;
SET use_hedged_requests=1;
SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*) settings log_comment='1_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f';
SYSTEM FLUSH LOGS;
SET allow_experimental_parallel_reading_from_replicas=0;
SELECT count() > 0 FROM system.text_log
WHERE yesterday() <= event_date
AND query_id in (select query_id from system.query_log where current_database = currentDatabase() AND log_comment = '1_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f')
AND level = 'Warning'
AND message ILIKE '%Setting ''use_hedged_requests'' explicitly with enabled ''allow_experimental_parallel_reading_from_replicas'' has no effect%'
SETTINGS allow_experimental_parallel_reading_from_replicas=0;
DROP TABLE test_parallel_replicas_settings;

View File

@ -31,7 +31,7 @@ test1() {
GROUP BY CounterID, URL, EventDate
ORDER BY URL, EventDate
LIMIT 5 OFFSET 10
SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, use_hedged_requests = 0"
SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3"
check_replicas_read_in_order $query_id
}
@ -48,7 +48,7 @@ test2() {
GROUP BY URL, EventDate
ORDER BY URL, EventDate
LIMIT 5 OFFSET 10
SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, use_hedged_requests = 0, query_plan_aggregation_in_order = 1"
SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, query_plan_aggregation_in_order = 1"
check_replicas_read_in_order $query_id
}
@ -64,7 +64,7 @@ test3() {
FROM test.hits
WHERE CounterID = 1704509 AND UserID = 4322253409885123546
GROUP BY URL, EventDate
SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, use_hedged_requests = 0
SETTINGS optimize_aggregation_in_order = 1, enable_memory_bound_merging_of_aggregation_results = 1, allow_experimental_parallel_reading_from_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3
)
WHERE explain LIKE '%Aggr%Transform%' OR explain LIKE '%InOrder%'"
}