ClickHouse/tests/integration/test_merge_tree_s3/test.py

810 lines
28 KiB
Python
Raw Normal View History

import logging
import time
import os
import pytest
2022-07-07 20:19:15 +00:00
from helpers.cluster import ClickHouseCluster
from helpers.utility import generate_values, replace_config, SafeThread
2022-09-18 21:47:43 +00:00
from helpers.wait_for_helpers import wait_for_delete_inactive_parts
from helpers.wait_for_helpers import wait_for_delete_empty_parts
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance(
"node",
main_configs=[
"configs/config.xml",
"configs/config.d/storage_conf.xml",
"configs/config.d/bg_processing_pool_conf.xml",
],
stay_alive=True,
with_minio=True,
)
cluster.add_instance(
"node_with_limited_disk",
main_configs=[
"configs/config.d/storage_conf.xml",
"configs/config.d/bg_processing_pool_conf.xml",
],
with_minio=True,
tmpfs=[
"/jbod1:size=2M",
],
)
2022-09-14 18:29:36 +00:00
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
2021-08-04 10:27:45 +00:00
run_s3_mocks(cluster)
yield cluster
finally:
cluster.shutdown()
FILES_OVERHEAD = 1
FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files
2020-08-28 20:38:44 +00:00
FILES_OVERHEAD_PER_PART_WIDE = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1
FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1
2021-09-20 18:18:08 +00:00
def create_table(node, table_name, **additional_settings):
2021-08-04 10:27:45 +00:00
settings = {
"storage_policy": "s3",
"old_parts_lifetime": 0,
"index_granularity": 512,
2021-08-04 10:27:45 +00:00
}
settings.update(additional_settings)
create_table_statement = f"""
CREATE TABLE {table_name} (
dt Date,
id Int64,
data String,
INDEX min_max (id) TYPE minmax GRANULARITY 3
) ENGINE=MergeTree()
PARTITION BY dt
ORDER BY (dt, id)
2021-08-04 10:27:45 +00:00
SETTINGS {",".join((k+"="+repr(v) for k, v in settings.items()))}"""
2021-09-20 18:18:08 +00:00
node.query(f"DROP TABLE IF EXISTS {table_name}")
node.query(create_table_statement)
2021-08-04 10:27:45 +00:00
def run_s3_mocks(cluster):
logging.info("Starting s3 mocks")
mocks = (
("unstable_proxy.py", "resolver", "8081"),
2022-07-11 14:53:05 +00:00
("no_delete_objects.py", "resolver", "8082"),
)
2021-08-04 10:27:45 +00:00
for mock_filename, container, port in mocks:
container_id = cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
cluster.copy_file_to_container(
container_id,
os.path.join(current_dir, "s3_mocks", mock_filename),
mock_filename,
)
cluster.exec_in_container(
container_id, ["python", mock_filename, port], detach=True
)
2021-08-04 10:27:45 +00:00
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
num_attempts = 100
for attempt in range(num_attempts):
ping_response = cluster.exec_in_container(
cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"],
nothrow=True,
)
2021-08-04 10:27:45 +00:00
if ping_response != "OK":
if attempt == num_attempts - 1:
assert (
ping_response == "OK"
), f'Expected "OK", but got "{ping_response}"'
2021-08-04 10:27:45 +00:00
else:
time.sleep(1)
else:
logging.debug(
f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}"
)
2021-08-04 10:27:45 +00:00
break
logging.info("S3 mocks started")
2021-04-09 19:30:42 +00:00
def wait_for_delete_s3_objects(cluster, expected, timeout=30):
minio = cluster.minio_client
while timeout > 0:
if (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== expected
):
2021-04-09 19:30:42 +00:00
return
timeout -= 1
time.sleep(1)
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== expected
)
2021-04-09 19:30:42 +00:00
@pytest.fixture(autouse=True)
@pytest.mark.parametrize("node_name", ["node"])
2021-09-20 18:18:08 +00:00
def drop_table(cluster, node_name):
yield
2021-09-20 18:18:08 +00:00
node = cluster.instances[node_name]
minio = cluster.minio_client
2020-03-27 22:58:03 +00:00
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
2021-04-09 19:30:42 +00:00
try:
2021-04-09 19:30:42 +00:00
wait_for_delete_s3_objects(cluster, 0)
finally:
# Remove extra objects to prevent tests cascade failing
for obj in list(
minio.list_objects(cluster.minio_bucket, "data/", recursive=True)
):
minio.remove_object(cluster.minio_bucket, obj.object_name)
@pytest.mark.parametrize(
2021-09-20 18:18:08 +00:00
"min_rows_for_wide_part,files_per_part,node_name",
[
2021-09-20 18:18:08 +00:00
(0, FILES_OVERHEAD_PER_PART_WIDE, "node"),
(8192, FILES_OVERHEAD_PER_PART_COMPACT, "node"),
],
)
def test_simple_insert_select(
cluster, min_rows_for_wide_part, files_per_part, node_name
):
2021-09-20 18:18:08 +00:00
node = cluster.instances[node_name]
create_table(node, "s3_test", min_rows_for_wide_part=min_rows_for_wide_part)
minio = cluster.minio_client
values1 = generate_values("2020-01-03", 4096)
node.query("INSERT INTO s3_test VALUES {}".format(values1))
assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + files_per_part
)
values2 = generate_values("2020-01-04", 4096)
node.query("INSERT INTO s3_test VALUES {}".format(values2))
assert (
node.query("SELECT * FROM s3_test ORDER BY dt, id FORMAT Values")
== values1 + "," + values2
)
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + files_per_part * 2
)
assert (
node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)"
)
@pytest.mark.parametrize("merge_vertical,node_name", [(True, "node"), (False, "node")])
2021-09-20 18:18:08 +00:00
def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name):
2021-08-04 10:27:45 +00:00
settings = {}
if merge_vertical:
settings["vertical_merge_algorithm_min_rows_to_activate"] = 0
settings["vertical_merge_algorithm_min_columns_to_activate"] = 0
2021-08-04 10:27:45 +00:00
2021-09-20 18:18:08 +00:00
node = cluster.instances[node_name]
create_table(node, "s3_test", **settings)
minio = cluster.minio_client
node.query("SYSTEM STOP MERGES s3_test")
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 1024))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 2048))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 1024, -1))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 2048, -1))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096, -1))
)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert (
node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
)
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD_PER_PART_WIDE * 6 + FILES_OVERHEAD
)
node.query("SYSTEM START MERGES s3_test")
# Wait for merges and old parts deletion
for attempt in range(0, 10):
parts_count = node.query(
2022-05-11 22:04:54 +00:00
"SELECT COUNT(*) FROM system.parts WHERE table = 's3_test' and active = 1 FORMAT Values"
)
2022-05-11 22:04:54 +00:00
if parts_count == "(1)":
break
if attempt == 9:
assert parts_count == "(1)"
time.sleep(1)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert (
node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
)
2022-05-14 11:46:47 +00:00
wait_for_delete_s3_objects(
cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD, timeout=45
)
@pytest.mark.parametrize("node_name", ["node"])
2021-09-20 18:18:08 +00:00
def test_alter_table_columns(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test")
minio = cluster.minio_client
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096, -1))
)
node.query("ALTER TABLE s3_test ADD COLUMN col1 UInt64 DEFAULT 1")
# To ensure parts have merged
node.query("OPTIMIZE TABLE s3_test")
assert node.query("SELECT sum(col1) FROM s3_test FORMAT Values") == "(8192)"
assert (
node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values")
== "(4096)"
)
wait_for_delete_s3_objects(
cluster,
FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN,
)
node.query(
"ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2}
)
2020-03-21 08:10:44 +00:00
assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')"
2020-03-21 08:10:44 +00:00
# and file with mutation
wait_for_delete_s3_objects(
cluster,
FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1,
)
2020-03-21 08:10:44 +00:00
node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2})
# and 2 files with mutations
wait_for_delete_s3_objects(
cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2
)
@pytest.mark.parametrize("node_name", ["node"])
2021-09-20 18:18:08 +00:00
def test_attach_detach_partition(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test")
minio = cluster.minio_client
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096))
)
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-03'")
wait_for_delete_empty_parts(node, "s3_test")
wait_for_delete_inactive_parts(node, "s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
node.query("ALTER TABLE s3_test ATTACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
node.query("ALTER TABLE s3_test DROP PARTITION '2020-01-03'")
wait_for_delete_empty_parts(node, "s3_test")
wait_for_delete_inactive_parts(node, "s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 1
)
node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-04'")
wait_for_delete_empty_parts(node, "s3_test")
wait_for_delete_inactive_parts(node, "s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/")))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 1
)
node.query(
"ALTER TABLE s3_test DROP DETACHED PARTITION '2020-01-04'",
settings={"allow_drop_detached": 1},
)
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 0
)
@pytest.mark.parametrize("node_name", ["node"])
2021-09-20 18:18:08 +00:00
def test_move_partition_to_another_disk(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test")
minio = cluster.minio_client
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096))
)
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE
)
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 's3'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
2021-09-20 18:18:08 +00:00
@pytest.mark.parametrize("node_name", ["node"])
def test_table_manipulations(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test")
minio = cluster.minio_client
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096))
)
node.query("RENAME TABLE s3_test TO s3_renamed")
assert node.query("SELECT count(*) FROM s3_renamed FORMAT Values") == "(8192)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
node.query("RENAME TABLE s3_renamed TO s3_test")
assert node.query("CHECK TABLE s3_test FORMAT Values") == "(1)"
node.query("DETACH TABLE s3_test")
node.query("ATTACH TABLE s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
node.query("TRUNCATE TABLE s3_test")
wait_for_delete_empty_parts(node, "s3_test")
wait_for_delete_inactive_parts(node, "s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD
)
@pytest.mark.parametrize("node_name", ["node"])
2021-09-20 18:18:08 +00:00
def test_move_replace_partition_to_another_table(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test")
minio = cluster.minio_client
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-05", 4096, -1))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-06", 4096, -1))
)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
)
2021-09-20 18:18:08 +00:00
create_table(node, "s3_clone")
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-03' TO TABLE s3_clone")
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-05' TO TABLE s3_clone")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Number of objects in S3 should be unchanged.
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4
)
# Add new partitions to source table, but with different values and replace them from copied table.
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096, -1))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-05", 4096))
)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6
)
node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-03' FROM s3_clone")
node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-05' FROM s3_clone")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Wait for outdated partitions deletion.
wait_for_delete_s3_objects(
cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4
)
2020-03-27 22:58:03 +00:00
node.query("DROP TABLE s3_clone NO DELAY")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
# Data should remain in S3
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
)
node.query("ALTER TABLE s3_test FREEZE")
# Number S3 objects should be unchanged.
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
)
2020-03-27 22:58:03 +00:00
node.query("DROP TABLE s3_test NO DELAY")
# Backup data should remain in S3.
2021-04-09 19:30:42 +00:00
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE * 4)
for obj in list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)):
minio.remove_object(cluster.minio_bucket, obj.object_name)
2021-02-24 14:26:46 +00:00
2021-09-20 18:18:08 +00:00
@pytest.mark.parametrize("node_name", ["node"])
def test_freeze_unfreeze(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test")
2021-02-24 14:26:46 +00:00
minio = cluster.minio_client
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096))
)
2021-02-24 14:26:46 +00:00
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup1'")
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096))
)
2021-03-02 21:10:09 +00:00
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup2'")
2021-02-24 14:26:46 +00:00
node.query("TRUNCATE TABLE s3_test")
wait_for_delete_empty_parts(node, "s3_test")
wait_for_delete_inactive_parts(node, "s3_test")
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
2021-03-02 21:10:09 +00:00
# Unfreeze single partition from backup1.
node.query(
"ALTER TABLE s3_test UNFREEZE PARTITION '2020-01-03' WITH NAME 'backup1'"
)
2021-03-02 21:10:09 +00:00
# Unfreeze all partitions from backup2.
node.query("ALTER TABLE s3_test UNFREEZE WITH NAME 'backup2'")
2021-02-24 14:26:46 +00:00
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD)
2021-03-02 21:10:09 +00:00
# Data should be removed from S3.
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD
)
2022-06-08 12:09:59 +00:00
@pytest.mark.parametrize("node_name", ["node"])
def test_freeze_system_unfreeze(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test")
create_table(node, "s3_test_removed")
minio = cluster.minio_client
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096))
)
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096))
)
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup3'")
node.query("ALTER TABLE s3_test_removed FREEZE WITH NAME 'backup3'")
node.query("TRUNCATE TABLE s3_test")
wait_for_delete_empty_parts(node, "s3_test")
wait_for_delete_inactive_parts(node, "s3_test")
2022-06-08 12:09:59 +00:00
node.query("DROP TABLE s3_test_removed NO DELAY")
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
2022-06-08 12:09:59 +00:00
== FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
)
# Unfreeze all data from backup3.
node.query("SYSTEM UNFREEZE WITH NAME 'backup3'")
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD)
2022-06-08 12:09:59 +00:00
# Data should be removed from S3.
assert (
len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)))
== FILES_OVERHEAD
2022-06-08 12:09:59 +00:00
)
2021-09-20 18:18:08 +00:00
@pytest.mark.parametrize("node_name", ["node"])
def test_s3_disk_apply_new_settings(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test")
2022-07-07 20:19:15 +00:00
config_path = os.path.join(
SCRIPT_DIR,
"./{}/node/configs/config.d/storage_conf.xml".format(
cluster.instances_dir_name
),
)
def get_s3_requests():
node.query("SYSTEM FLUSH LOGS")
return int(
node.query(
"SELECT value FROM system.events WHERE event='S3WriteRequestsCount'"
)
)
s3_requests_before = get_s3_requests()
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096))
)
s3_requests_to_write_partition = get_s3_requests() - s3_requests_before
# Force multi-part upload mode.
replace_config(
2022-07-07 20:19:15 +00:00
config_path,
"<s3_max_single_part_upload_size>33554432</s3_max_single_part_upload_size>",
"<s3_max_single_part_upload_size>0</s3_max_single_part_upload_size>",
)
node.query("SYSTEM RELOAD CONFIG")
s3_requests_before = get_s3_requests()
node.query(
"INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096, -1))
)
# There should be 3 times more S3 requests because multi-part upload mode uses 3 requests to upload object.
assert get_s3_requests() - s3_requests_before == s3_requests_to_write_partition * 3
@pytest.mark.parametrize("node_name", ["node"])
def test_s3_no_delete_objects(cluster, node_name):
node = cluster.instances[node_name]
2022-07-11 14:53:05 +00:00
create_table(
node, "s3_test_no_delete_objects", storage_policy="no_delete_objects_s3"
)
2022-07-11 14:53:05 +00:00
node.query("DROP TABLE s3_test_no_delete_objects SYNC")
@pytest.mark.parametrize("node_name", ["node"])
2021-09-20 18:18:08 +00:00
def test_s3_disk_reads_on_unstable_connection(cluster, node_name):
node = cluster.instances[node_name]
create_table(node, "s3_test", storage_policy="unstable_s3")
node.query(
"INSERT INTO s3_test SELECT today(), *, toString(*) FROM system.numbers LIMIT 9000000"
)
2021-08-04 10:27:45 +00:00
for i in range(30):
print(f"Read sequence {i}")
assert node.query("SELECT sum(id) FROM s3_test").splitlines() == [
"40499995500000"
]
2021-12-16 21:29:25 +00:00
@pytest.mark.parametrize("node_name", ["node"])
def test_lazy_seek_optimization_for_async_read(cluster, node_name):
node = cluster.instances[node_name]
2021-12-19 18:24:52 +00:00
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
node.query(
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3';"
)
node.query(
"INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000000"
)
2021-12-19 18:24:52 +00:00
node.query("SELECT * FROM s3_test WHERE value LIKE '%abc%' ORDER BY value LIMIT 10")
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
minio = cluster.minio_client
for obj in list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)):
2021-12-19 18:24:52 +00:00
minio.remove_object(cluster.minio_bucket, obj.object_name)
@pytest.mark.parametrize("node_name", ["node_with_limited_disk"])
def test_cache_with_full_disk_space(cluster, node_name):
node = cluster.instances[node_name]
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
node.query(
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_with_cache_and_jbod';"
)
node.query(
"INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 500000"
)
node.query(
"SELECT * FROM s3_test WHERE value LIKE '%abc%' ORDER BY value FORMAT Null"
)
assert node.contains_in_log(
2022-07-13 09:48:39 +00:00
"Insert into cache is skipped due to insufficient disk space"
)
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
@pytest.mark.parametrize("node_name", ["node"])
def test_store_cleanup_disk_s3(cluster, node_name):
node = cluster.instances[node_name]
2022-08-04 17:48:55 +00:00
node.query("DROP TABLE IF EXISTS s3_test SYNC")
node.query(
2022-08-04 17:48:55 +00:00
"CREATE TABLE s3_test UUID '00000000-1000-4000-8000-000000000001' (n UInt64) Engine=MergeTree() ORDER BY n SETTINGS storage_policy='s3';"
)
2022-08-04 17:48:55 +00:00
node.query("INSERT INTO s3_test SELECT 1")
node.stop_clickhouse(kill=True)
path_to_data = "/var/lib/clickhouse/"
2022-08-04 17:48:55 +00:00
node.exec_in_container(["rm", f"{path_to_data}/metadata/default/s3_test.sql"])
node.start_clickhouse()
node.wait_for_log_line(
"Removing unused directory", timeout=90, look_behind_lines=1000
)
node.wait_for_log_line("directories from store")
node.query(
2022-08-04 17:48:55 +00:00
"CREATE TABLE s3_test UUID '00000000-1000-4000-8000-000000000001' (n UInt64) Engine=MergeTree() ORDER BY n SETTINGS storage_policy='s3';"
)
2022-08-04 17:48:55 +00:00
node.query("INSERT INTO s3_test SELECT 1")
2022-09-14 18:29:36 +00:00
@pytest.mark.parametrize("node_name", ["node"])
def test_cache_setting_compatibility(cluster, node_name):
node = cluster.instances[node_name]
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
node.query(
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_r', compress_marks=false, compress_primary_key=false;"
2022-09-14 18:29:36 +00:00
)
node.query(
"INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 500"
)
result = node.query("SYSTEM DROP FILESYSTEM CACHE")
result = node.query(
"SELECT count() FROM system.filesystem_cache WHERE cache_path LIKE '%persistent'"
)
assert int(result) == 0
node.query("SELECT * FROM s3_test")
result = node.query(
"SELECT count() FROM system.filesystem_cache WHERE cache_path LIKE '%persistent'"
)
assert int(result) > 0
config_path = os.path.join(
SCRIPT_DIR,
f"./{cluster.instances_dir_name}/node/configs/config.d/storage_conf.xml",
)
replace_config(
config_path,
"<do_not_evict_index_and_mark_files>1</do_not_evict_index_and_mark_files>",
"<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>",
)
2022-09-19 12:02:51 +00:00
result = node.query("DESCRIBE FILESYSTEM CACHE 's3_cache_r'")
2022-09-14 18:29:36 +00:00
assert result.strip().endswith("1")
node.restart_clickhouse()
2022-09-19 12:02:51 +00:00
result = node.query("DESCRIBE FILESYSTEM CACHE 's3_cache_r'")
2022-09-14 18:29:36 +00:00
assert result.strip().endswith("0")
result = node.query(
"SELECT count() FROM system.filesystem_cache WHERE cache_path LIKE '%persistent'"
)
assert int(result) > 0
node.query("SELECT * FROM s3_test FORMAT Null")
assert not node.contains_in_log("No such file or directory: Cache info:")
replace_config(
config_path,
"<do_not_evict_index_and_mark_files>0</do_not_evict_index_and_mark_files>",
"<do_not_evict_index_and_mark_files>1</do_not_evict_index_and_mark_files>",
)
result = node.query(
"SELECT count() FROM system.filesystem_cache WHERE cache_path LIKE '%persistent'"
)
assert int(result) > 0
node.restart_clickhouse()
2022-09-19 12:02:51 +00:00
result = node.query("DESCRIBE FILESYSTEM CACHE 's3_cache_r'")
2022-09-14 18:29:36 +00:00
assert result.strip().endswith("1")
node.query("SELECT * FROM s3_test FORMAT Null")
assert not node.contains_in_log("No such file or directory: Cache info:")