mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 10:02:01 +00:00
c25d6cd624
* Limit log frequence for "Skipping send data over distributed table" message After SYSTEM STOP DISTRIBUTED SENDS it will constantly print this message. Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com> * Rename directory monitor concept into async INSERT Rename the following query settings (with preserving backward compatiblity, by keeping old name as an alias): - distributed_directory_monitor_sleep_time_ms -> distributed_async_insert_sleep_time_ms - distributed_directory_monitor_max_sleep_time_ms -> distributed_async_insert_max_sleep_time_ms - distributed_directory_monitor_batch -> distributed_async_insert_batch_inserts - distributed_directory_monitor_split_batch_on_failure -> distributed_async_insert_split_batch_on_failure Rename the following table settings (with preserving backward compatiblity, by keeping old name as an alias): - monitor_batch_inserts -> async_insert_batch - monitor_split_batch_on_failure -> async_insert_split_batch_on_failure - directory_monitor_sleep_time_ms -> async_insert_sleep_time_ms - directory_monitor_max_sleep_time_ms -> async_insert_max_sleep_time_ms And also update all the references: $ gg -e directory_monitor_ -e monitor_ tests docs | cut -d: -f1 | sort -u | xargs sed -e 's/distributed_directory_monitor_sleep_time_ms/distributed_async_insert_sleep_time_ms/g' -e 's/distributed_directory_monitor_max_sleep_time_ms/distributed_async_insert_max_sleep_time_ms/g' -e 's/distributed_directory_monitor_batch_inserts/distributed_async_insert_batch/g' -e 's/distributed_directory_monitor_split_batch_on_failure/distributed_async_insert_split_batch_on_failure/g' -e 's/monitor_batch_inserts/async_insert_batch/g' -e 's/monitor_split_batch_on_failure/async_insert_split_batch_on_failure/g' -e 's/monitor_sleep_time_ms/async_insert_sleep_time_ms/g' -e 's/monitor_max_sleep_time_ms/async_insert_max_sleep_time_ms/g' -i Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com> * Rename async_insert for Distributed into background_insert This will avoid amigibuity between general async INSERT's and INSERT into Distributed, which are indeed background, so new term express it even better. Mostly done with: $ git di HEAD^ --name-only | xargs sed -i -e 's/distributed_async_insert/distributed_background_insert/g' -e 's/async_insert_batch/background_insert_batch/g' -e 's/async_insert_split_batch_on_failure/background_insert_split_batch_on_failure/g' -e 's/async_insert_sleep_time_ms/background_insert_sleep_time_ms/g' -e 's/async_insert_max_sleep_time_ms/background_insert_max_sleep_time_ms/g' Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com> * Mark 02417_opentelemetry_insert_on_distributed_table as long CI: https://s3.amazonaws.com/clickhouse-test-reports/55978/7a6abb03a0b507e29e999cb7e04f246a119c6f28/stateless_tests_flaky_check__asan_.html Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com> --------- Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
98 lines
2.7 KiB
Python
98 lines
2.7 KiB
Python
# pylint: disable=unused-argument
|
|
# pylint: disable=redefined-outer-name
|
|
# pylint: disable=line-too-long
|
|
|
|
import pytest
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
n1 = cluster.add_instance("n1", main_configs=["configs/remote_servers.xml"])
|
|
n2 = cluster.add_instance("n2", main_configs=["configs/remote_servers.xml"])
|
|
|
|
params = pytest.mark.parametrize(
|
|
"cluster,q",
|
|
[
|
|
("internal_replication", 0),
|
|
("no_internal_replication", 1),
|
|
],
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="module", autouse=True)
|
|
def start_cluster():
|
|
try:
|
|
cluster.start()
|
|
yield cluster
|
|
finally:
|
|
cluster.shutdown()
|
|
|
|
|
|
def create_tables(cluster):
|
|
n1.query("DROP TABLE IF EXISTS data")
|
|
n2.query("DROP TABLE IF EXISTS data")
|
|
n1.query("DROP TABLE IF EXISTS dist")
|
|
|
|
n1.query("CREATE TABLE data (key Int) Engine=Memory()")
|
|
n2.query("CREATE TABLE data (key Int) Engine=Memory()")
|
|
n1.query(
|
|
"""
|
|
CREATE TABLE dist AS data
|
|
Engine=Distributed(
|
|
{cluster},
|
|
currentDatabase(),
|
|
data,
|
|
rand()
|
|
)
|
|
""".format(
|
|
cluster=cluster
|
|
)
|
|
)
|
|
|
|
|
|
def insert_data(cluster, **settings):
|
|
create_tables(cluster)
|
|
n1.query("INSERT INTO dist SELECT * FROM numbers(10)", settings=settings)
|
|
n1.query("SYSTEM FLUSH DISTRIBUTED dist")
|
|
|
|
|
|
@params
|
|
def test_prefer_localhost_replica_1(cluster, q):
|
|
insert_data(cluster)
|
|
assert int(n1.query("SELECT count() FROM data")) == 10
|
|
assert int(n2.query("SELECT count() FROM data")) == 10 * q
|
|
|
|
|
|
@params
|
|
def test_prefer_localhost_replica_1_load_balancing_in_order(cluster, q):
|
|
insert_data(cluster, load_balancing="in_order")
|
|
assert int(n1.query("SELECT count() FROM data")) == 10
|
|
assert int(n2.query("SELECT count() FROM data")) == 10 * q
|
|
|
|
|
|
@params
|
|
def test_prefer_localhost_replica_0_load_balancing_nearest_hostname(cluster, q):
|
|
insert_data(cluster, load_balancing="nearest_hostname", prefer_localhost_replica=0)
|
|
assert int(n1.query("SELECT count() FROM data")) == 10
|
|
assert int(n2.query("SELECT count() FROM data")) == 10 * q
|
|
|
|
|
|
@params
|
|
def test_prefer_localhost_replica_0_load_balancing_in_order(cluster, q):
|
|
insert_data(cluster, load_balancing="in_order", prefer_localhost_replica=0)
|
|
assert int(n1.query("SELECT count() FROM data")) == 10 * q
|
|
assert int(n2.query("SELECT count() FROM data")) == 10
|
|
|
|
|
|
@params
|
|
def test_prefer_localhost_replica_0_load_balancing_in_order_sync(cluster, q):
|
|
insert_data(
|
|
cluster,
|
|
load_balancing="in_order",
|
|
prefer_localhost_replica=0,
|
|
distributed_foreground_insert=1,
|
|
)
|
|
assert int(n1.query("SELECT count() FROM data")) == 10 * q
|
|
assert int(n2.query("SELECT count() FROM data")) == 10
|