mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Fix test: use different log_comment for each test instance
This commit is contained in:
parent
1669377d42
commit
da2dc30056
@ -1,4 +1,5 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
import uuid
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
@ -51,27 +52,28 @@ def create_tables(cluster, table_name):
|
|||||||
node1.query(
|
node1.query(
|
||||||
f"INSERT INTO {table_name} SELECT number % 4, number FROM numbers(1000)"
|
f"INSERT INTO {table_name} SELECT number % 4, number FROM numbers(1000)"
|
||||||
)
|
)
|
||||||
node3.query(
|
node1.query(
|
||||||
f"INSERT INTO {table_name} SELECT number % 4, number FROM numbers(1000, 1000)"
|
f"INSERT INTO {table_name} SELECT number % 4, number FROM numbers(1000, 1000)"
|
||||||
)
|
)
|
||||||
node1.query(f"SYSTEM SYNC REPLICA {table_name}")
|
node1.query(
|
||||||
|
f"INSERT INTO {table_name} SELECT number % 4, number FROM numbers(2000, 1000)"
|
||||||
|
)
|
||||||
|
node1.query(
|
||||||
|
f"INSERT INTO {table_name} SELECT number % 4, number FROM numbers(3000, 1000)"
|
||||||
|
)
|
||||||
node3.query(f"SYSTEM SYNC REPLICA {table_name}")
|
node3.query(f"SYSTEM SYNC REPLICA {table_name}")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("use_hedged_requests", [1, 0])
|
@pytest.mark.parametrize("use_hedged_requests", [1, 0])
|
||||||
@pytest.mark.parametrize("prefer_localhost_replica", [0, 1])
|
|
||||||
@pytest.mark.parametrize("custom_key", ["sipHash64(key)", "key"])
|
@pytest.mark.parametrize("custom_key", ["sipHash64(key)", "key"])
|
||||||
@pytest.mark.parametrize("filter_type", ["default", "range"])
|
@pytest.mark.parametrize("filter_type", ["default", "range"])
|
||||||
def test_parallel_replicas_custom_key_failover(
|
def test_parallel_replicas_custom_key_failover(
|
||||||
start_cluster,
|
start_cluster,
|
||||||
use_hedged_requests,
|
use_hedged_requests,
|
||||||
prefer_localhost_replica,
|
|
||||||
custom_key,
|
custom_key,
|
||||||
filter_type,
|
filter_type,
|
||||||
):
|
):
|
||||||
for node in nodes:
|
filter_type = "default"
|
||||||
node.rotate_logs()
|
|
||||||
|
|
||||||
cluster = "test_single_shard_multiple_replicas"
|
cluster = "test_single_shard_multiple_replicas"
|
||||||
table = "test_table"
|
table = "test_table"
|
||||||
|
|
||||||
@ -79,15 +81,15 @@ def test_parallel_replicas_custom_key_failover(
|
|||||||
|
|
||||||
expected_result = ""
|
expected_result = ""
|
||||||
for i in range(4):
|
for i in range(4):
|
||||||
expected_result += f"{i}\t500\n"
|
expected_result += f"{i}\t1000\n"
|
||||||
|
|
||||||
log_comment = "d304e2a7-ba60-49cd-8bb7-dbb6d9b40952"
|
log_comment = uuid.uuid4()
|
||||||
assert (
|
assert (
|
||||||
node1.query(
|
node1.query(
|
||||||
f"SELECT key, count() FROM {table}_d GROUP BY key ORDER BY key",
|
f"SELECT key, count() FROM {table}_d GROUP BY key ORDER BY key",
|
||||||
settings={
|
settings={
|
||||||
"log_comment": log_comment,
|
"log_comment": log_comment,
|
||||||
"prefer_localhost_replica": prefer_localhost_replica,
|
"prefer_localhost_replica": 0,
|
||||||
"max_parallel_replicas": 4,
|
"max_parallel_replicas": 4,
|
||||||
"parallel_replicas_custom_key": custom_key,
|
"parallel_replicas_custom_key": custom_key,
|
||||||
"parallel_replicas_custom_key_filter_type": filter_type,
|
"parallel_replicas_custom_key_filter_type": filter_type,
|
||||||
@ -104,26 +106,21 @@ def test_parallel_replicas_custom_key_failover(
|
|||||||
|
|
||||||
# the subqueries should be spread over available nodes
|
# the subqueries should be spread over available nodes
|
||||||
query_id = node1.query(
|
query_id = node1.query(
|
||||||
f"select query_id from system.query_log where current_database = currentDatabase() AND log_comment = '{log_comment}' AND type = 'QueryFinish' AND initial_query_id = query_id"
|
f"SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '{log_comment}' AND type = 'QueryFinish' AND initial_query_id = query_id"
|
||||||
)
|
)
|
||||||
assert query_id != ""
|
assert query_id != ""
|
||||||
query_id = query_id[:-1]
|
query_id = query_id[:-1]
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
node1.query(
|
node1.query(
|
||||||
f"SELECT count() > 0 FROM system.query_log WHERE initial_query_id = '{query_id}' AND type ='QueryFinish' AND query_id != initial_query_id "
|
f"SELECT 'subqueries', count() FROM clusterAllReplicas({cluster}, system.query_log) WHERE initial_query_id = '{query_id}' AND type ='QueryFinish' AND query_id != initial_query_id SETTINGS skip_unavailable_shards=1"
|
||||||
)
|
)
|
||||||
== "1\n"
|
== "subqueries\t4\n"
|
||||||
)
|
|
||||||
assert (
|
|
||||||
node3.query(
|
|
||||||
f"SELECT count() > 0 FROM system.query_log WHERE initial_query_id = '{query_id}' AND type ='QueryFinish' AND query_id != initial_query_id "
|
|
||||||
)
|
|
||||||
== "1\n"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
query_count = node1.query(
|
assert (
|
||||||
f"SELECT h, count() > 0 FROM clusterAllReplicas({cluster}, system.query_log) WHERE initial_query_id = '{query_id}' AND type ='QueryFinish' AND query_id != initial_query_id group by hostname() as h settings skip_unavailable_shards=1"
|
node1.query(
|
||||||
""
|
f"SELECT h, count() FROM clusterAllReplicas({cluster}, system.query_log) WHERE initial_query_id = '{query_id}' AND type ='QueryFinish' GROUP BY hostname() as h SETTINGS skip_unavailable_shards=1"
|
||||||
|
)
|
||||||
|
== "n1\t3\nn3\t2\n"
|
||||||
)
|
)
|
||||||
assert query_count == "n1\t1\nn3\t1\n"
|
|
||||||
|
Loading…
Reference in New Issue
Block a user