ClickHouse/tests/integration/test_insert_distributed_async_send/test.py
Azat Khuzhin c25d6cd624
Rename directory monitor concept into background INSERT (#55978)
* Limit log frequence for "Skipping send data over distributed table" message

After SYSTEM STOP DISTRIBUTED SENDS it will constantly print this
message.

Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>

* Rename directory monitor concept into async INSERT

Rename the following query settings (with preserving backward
compatiblity, by keeping old name as an alias):
- distributed_directory_monitor_sleep_time_ms -> distributed_async_insert_sleep_time_ms
- distributed_directory_monitor_max_sleep_time_ms -> distributed_async_insert_max_sleep_time_ms
- distributed_directory_monitor_batch -> distributed_async_insert_batch_inserts
- distributed_directory_monitor_split_batch_on_failure -> distributed_async_insert_split_batch_on_failure

Rename the following table settings (with preserving backward
compatiblity, by keeping old name as an alias):
- monitor_batch_inserts -> async_insert_batch
- monitor_split_batch_on_failure -> async_insert_split_batch_on_failure
- directory_monitor_sleep_time_ms -> async_insert_sleep_time_ms
- directory_monitor_max_sleep_time_ms -> async_insert_max_sleep_time_ms

And also update all the references:

    $ gg -e directory_monitor_ -e monitor_ tests docs | cut -d: -f1 | sort -u | xargs sed -e 's/distributed_directory_monitor_sleep_time_ms/distributed_async_insert_sleep_time_ms/g' -e 's/distributed_directory_monitor_max_sleep_time_ms/distributed_async_insert_max_sleep_time_ms/g' -e 's/distributed_directory_monitor_batch_inserts/distributed_async_insert_batch/g' -e 's/distributed_directory_monitor_split_batch_on_failure/distributed_async_insert_split_batch_on_failure/g' -e 's/monitor_batch_inserts/async_insert_batch/g' -e 's/monitor_split_batch_on_failure/async_insert_split_batch_on_failure/g' -e 's/monitor_sleep_time_ms/async_insert_sleep_time_ms/g' -e 's/monitor_max_sleep_time_ms/async_insert_max_sleep_time_ms/g' -i

Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>

* Rename async_insert for Distributed into background_insert

This will avoid amigibuity between general async INSERT's and INSERT
into Distributed, which are indeed background, so new term express it
even better.

Mostly done with:

    $ git di HEAD^ --name-only | xargs sed -i -e 's/distributed_async_insert/distributed_background_insert/g' -e 's/async_insert_batch/background_insert_batch/g' -e 's/async_insert_split_batch_on_failure/background_insert_split_batch_on_failure/g' -e 's/async_insert_sleep_time_ms/background_insert_sleep_time_ms/g' -e 's/async_insert_max_sleep_time_ms/background_insert_max_sleep_time_ms/g'

Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>

* Mark 02417_opentelemetry_insert_on_distributed_table as long

CI: https://s3.amazonaws.com/clickhouse-test-reports/55978/7a6abb03a0b507e29e999cb7e04f246a119c6f28/stateless_tests_flaky_check__asan_.html
Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>

---------

Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
2023-11-01 15:09:39 +01:00

326 lines
9.4 KiB
Python

# pylint: disable=unused-argument
# pylint: disable=redefined-outer-name
# pylint: disable=line-too-long
# NOTES:
# - timeout should not be reduced due to bit flip of the corrupted buffer
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.client import QueryRuntimeException
cluster = ClickHouseCluster(__file__)
# n1 -- distributed_background_insert_batch=1
n1 = cluster.add_instance(
"n1",
main_configs=["configs/remote_servers.xml"],
user_configs=["configs/users.d/batch.xml"],
)
# n2 -- distributed_background_insert_batch=0
n2 = cluster.add_instance(
"n2",
main_configs=["configs/remote_servers.xml"],
user_configs=["configs/users.d/no_batch.xml"],
)
# n3 -- distributed_background_insert_batch=1/distributed_background_insert_split_batch_on_failure=1
n3 = cluster.add_instance(
"n3",
main_configs=["configs/remote_servers_split.xml"],
user_configs=[
"configs/users.d/batch.xml",
"configs/users.d/split.xml",
],
)
# n4 -- distributed_background_insert_batch=0/distributed_background_insert_split_batch_on_failure=1
n4 = cluster.add_instance(
"n4",
main_configs=["configs/remote_servers_split.xml"],
user_configs=[
"configs/users.d/no_batch.xml",
"configs/users.d/split.xml",
],
)
batch_params = pytest.mark.parametrize(
"batch",
[
(1),
(0),
],
)
batch_and_split_params = pytest.mark.parametrize(
"batch,split",
[
(1, 0),
(0, 0),
(1, 1),
(0, 1),
],
)
@pytest.fixture(scope="module", autouse=True)
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def create_tables(remote_cluster_name):
for _, instance in list(cluster.instances.items()):
instance.query(
"CREATE TABLE data (key Int, value String) Engine=MergeTree() ORDER BY key"
)
instance.query(
f"""
CREATE TABLE dist AS data
Engine=Distributed(
{remote_cluster_name},
currentDatabase(),
data,
key
)
"""
)
# only via SYSTEM FLUSH DISTRIBUTED
instance.query("SYSTEM STOP DISTRIBUTED SENDS dist")
def drop_tables():
for _, instance in list(cluster.instances.items()):
instance.query("DROP TABLE IF EXISTS data")
instance.query("DROP TABLE IF EXISTS dist")
# return amount of bytes of the 2.bin for n2 shard
def insert_data(node):
node.query(
"INSERT INTO dist SELECT number, randomPrintableASCII(100) FROM numbers(10000)",
settings={
# do not do direct INSERT, always via SYSTEM FLUSH DISTRIBUTED
"prefer_localhost_replica": 0,
},
)
path = get_path_to_dist_batch()
size = int(node.exec_in_container(["bash", "-c", f"wc -c < {path}"]))
assert size > 1 << 16
return size
def get_node(batch, split=None):
if split:
if batch:
return n3
return n4
if batch:
return n1
return n2
def bootstrap(batch, split=None):
drop_tables()
create_tables("insert_distributed_async_send_cluster_two_replicas")
return insert_data(get_node(batch, split))
def get_path_to_dist_batch(file="2.bin"):
# There are:
# - /var/lib/clickhouse/data/default/dist/shard1_replica1/1.bin
# - /var/lib/clickhouse/data/default/dist/shard1_replica2/2.bin
#
# @return the file for the n2 shard
return f"/var/lib/clickhouse/data/default/dist/shard1_replica2/{file}"
def check_dist_after_corruption(truncate, batch, split=None):
node = get_node(batch, split)
if batch:
# In batch mode errors are ignored
node.query("SYSTEM FLUSH DISTRIBUTED dist")
else:
if truncate:
with pytest.raises(
QueryRuntimeException, match="Cannot read all data. Bytes read:"
):
node.query("SYSTEM FLUSH DISTRIBUTED dist")
else:
with pytest.raises(
QueryRuntimeException,
match="Checksum doesn't match: corrupted data. Reference:",
):
node.query("SYSTEM FLUSH DISTRIBUTED dist")
# send pending files
# (since we have two nodes and corrupt file for only one of them)
node.query("SYSTEM FLUSH DISTRIBUTED dist")
# but there is broken file
broken = get_path_to_dist_batch("broken")
node.exec_in_container(["bash", "-c", f"ls {broken}/2.bin"])
if split:
assert int(n3.query("SELECT count() FROM data")) == 10000
assert int(n4.query("SELECT count() FROM data")) == 0
else:
assert int(n1.query("SELECT count() FROM data")) == 10000
assert int(n2.query("SELECT count() FROM data")) == 0
@batch_params
def test_insert_distributed_async_send_success(batch):
bootstrap(batch)
node = get_node(batch)
node.query("SYSTEM FLUSH DISTRIBUTED dist")
assert int(n1.query("SELECT count() FROM data")) == 10000
assert int(n2.query("SELECT count() FROM data")) == 10000
@batch_and_split_params
def test_insert_distributed_async_send_truncated_1(batch, split):
size = bootstrap(batch, split)
path = get_path_to_dist_batch()
node = get_node(batch, split)
new_size = size - 10
# we cannot use truncate, due to hardlinks
node.exec_in_container(
["bash", "-c", f"mv {path} /tmp/bin && head -c {new_size} /tmp/bin > {path}"]
)
check_dist_after_corruption(True, batch, split)
@batch_params
def test_insert_distributed_async_send_truncated_2(batch):
bootstrap(batch)
path = get_path_to_dist_batch()
node = get_node(batch)
# we cannot use truncate, due to hardlinks
node.exec_in_container(
["bash", "-c", f"mv {path} /tmp/bin && head -c 10000 /tmp/bin > {path}"]
)
check_dist_after_corruption(True, batch)
# The difference from the test_insert_distributed_async_send_corrupted_small
# is that small corruption will be seen only on local node
@batch_params
def test_insert_distributed_async_send_corrupted_big(batch):
size = bootstrap(batch)
path = get_path_to_dist_batch()
node = get_node(batch)
from_original_size = size - 8192
zeros_size = 8192
node.exec_in_container(
[
"bash",
"-c",
f"mv {path} /tmp/bin && head -c {from_original_size} /tmp/bin > {path} && head -c {zeros_size} /dev/zero >> {path}",
]
)
check_dist_after_corruption(False, batch)
@batch_params
def test_insert_distributed_async_send_corrupted_small(batch):
size = bootstrap(batch)
path = get_path_to_dist_batch()
node = get_node(batch)
from_original_size = size - 60
zeros_size = 60
node.exec_in_container(
[
"bash",
"-c",
f"mv {path} /tmp/bin && head -c {from_original_size} /tmp/bin > {path} && head -c {zeros_size} /dev/zero >> {path}",
]
)
check_dist_after_corruption(False, batch)
@batch_params
def test_insert_distributed_async_send_different_header(batch):
"""
Check INSERT Into Distributed() with different headers in *.bin
If batching will not distinguish headers underlying table will never receive the data.
"""
drop_tables()
create_tables("insert_distributed_async_send_cluster_two_shards")
node = get_node(batch)
node.query(
"INSERT INTO dist VALUES (0, 'f')",
settings={
"prefer_localhost_replica": 0,
},
)
node.query("ALTER TABLE dist MODIFY COLUMN value UInt64")
node.query(
"INSERT INTO dist VALUES (2, 1)",
settings={
"prefer_localhost_replica": 0,
},
)
n1.query(
"ALTER TABLE data MODIFY COLUMN value UInt64",
settings={
"mutations_sync": 1,
},
)
if batch:
# but only one batch will be sent, and first is with UInt64 column, so
# one rows inserted, and for string ('f') exception will be throw.
with pytest.raises(
QueryRuntimeException,
match=r"DB::Exception: Cannot parse string 'f' as UInt64: syntax error at begin of string",
):
node.query("SYSTEM FLUSH DISTRIBUTED dist")
assert int(n1.query("SELECT count() FROM data")) == 1
# but once underlying column String, implicit conversion will do the
# thing, and insert left batch.
n1.query(
"""
DROP TABLE data SYNC;
CREATE TABLE data (key Int, value String) Engine=MergeTree() ORDER BY key;
"""
)
node.query("SYSTEM FLUSH DISTRIBUTED dist")
assert int(n1.query("SELECT count() FROM data")) == 1
else:
# first send with String ('f'), so zero rows will be inserted
with pytest.raises(
QueryRuntimeException,
match=r"DB::Exception: Cannot parse string 'f' as UInt64: syntax error at begin of string",
):
node.query("SYSTEM FLUSH DISTRIBUTED dist")
assert int(n1.query("SELECT count() FROM data")) == 0
# but once underlying column String, implicit conversion will do the
# thing, and insert 2 rows (mixed UInt64 and String).
n1.query(
"""
DROP TABLE data SYNC;
CREATE TABLE data (key Int, value String) Engine=MergeTree() ORDER BY key;
"""
)
node.query("SYSTEM FLUSH DISTRIBUTED dist")
assert int(n1.query("SELECT count() FROM data")) == 2
assert int(n2.query("SELECT count() FROM data")) == 0