2023-11-16 09:57:09 +00:00
|
|
|
import inspect
|
2019-11-25 07:46:58 +00:00
|
|
|
import random
|
2019-11-30 19:22:01 +00:00
|
|
|
import threading
|
2019-11-25 07:46:58 +00:00
|
|
|
import time
|
|
|
|
from multiprocessing.dummy import Pool
|
2020-11-29 16:44:02 +00:00
|
|
|
from helpers.test_tools import assert_logs_contain_with_retry
|
2020-09-16 04:26:10 +00:00
|
|
|
|
|
|
|
import pytest
|
2019-11-25 07:46:58 +00:00
|
|
|
from helpers.client import QueryRuntimeException
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
2023-11-16 09:57:09 +00:00
|
|
|
from helpers.network import PartitionManager
|
|
|
|
from helpers.test_tools import assert_eq_with_retry
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-11-29 16:44:02 +00:00
|
|
|
# FIXME: each sleep(1) is a time bomb, and not only this cause false positive
|
|
|
|
# it also makes the test not reliable (i.e. assertions may be wrong, due timing issues)
|
|
|
|
# Seems that some SYSTEM query should be added to wait those things insteadof sleep.
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
|
|
|
|
node1 = cluster.add_instance(
|
|
|
|
"node1",
|
2020-09-16 04:26:10 +00:00
|
|
|
main_configs=[
|
|
|
|
"configs/logs_config.xml",
|
|
|
|
"configs/config.d/instant_moves.xml",
|
|
|
|
"configs/config.d/storage_configuration.xml",
|
|
|
|
"configs/config.d/cluster.xml",
|
|
|
|
],
|
|
|
|
with_zookeeper=True,
|
|
|
|
tmpfs=["/jbod1:size=40M", "/jbod2:size=40M", "/external:size=200M"],
|
|
|
|
macros={"shard": 0, "replica": 1},
|
2023-11-16 09:57:09 +00:00
|
|
|
stay_alive=True,
|
2020-09-16 04:26:10 +00:00
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
node2 = cluster.add_instance(
|
|
|
|
"node2",
|
2020-09-16 04:26:10 +00:00
|
|
|
main_configs=[
|
|
|
|
"configs/logs_config.xml",
|
|
|
|
"configs/config.d/instant_moves.xml",
|
|
|
|
"configs/config.d/storage_configuration.xml",
|
|
|
|
"configs/config.d/cluster.xml",
|
|
|
|
],
|
|
|
|
with_zookeeper=True,
|
|
|
|
tmpfs=["/jbod1:size=40M", "/jbod2:size=40M", "/external:size=200M"],
|
|
|
|
macros={"shard": 0, "replica": 2},
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def started_cluster():
|
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
yield cluster
|
|
|
|
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
|
|
|
|
2020-03-10 11:55:27 +00:00
|
|
|
def get_used_disks_for_table(node, table_name, partition=None):
|
|
|
|
if partition is None:
|
|
|
|
suffix = ""
|
|
|
|
else:
|
|
|
|
suffix = "and partition='{}'".format(partition)
|
|
|
|
return (
|
|
|
|
node.query(
|
|
|
|
"""
|
|
|
|
SELECT disk_name
|
|
|
|
FROM system.parts
|
|
|
|
WHERE table == '{name}' AND active=1 {suffix}
|
|
|
|
ORDER BY modification_time
|
|
|
|
""".format(
|
|
|
|
name=table_name, suffix=suffix
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2020-03-10 11:55:27 +00:00
|
|
|
.strip()
|
|
|
|
.split("\n")
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2024-08-04 12:14:28 +00:00
|
|
|
def check_used_disks_with_retry(node, table_name, expected_disks, retries=1):
|
2020-06-02 08:42:42 +00:00
|
|
|
for _ in range(retries):
|
|
|
|
used_disks = get_used_disks_for_table(node, table_name)
|
2020-06-08 11:25:30 +00:00
|
|
|
if set(used_disks).issubset(expected_disks):
|
2020-06-02 08:42:42 +00:00
|
|
|
return True
|
|
|
|
time.sleep(0.5)
|
|
|
|
return False
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
|
2020-11-29 16:44:02 +00:00
|
|
|
# Use unique table name for flaky checker, that run tests multiple times
|
|
|
|
def unique_table_name(base_name):
|
|
|
|
return f"{base_name}_{int(time.time())}"
|
2022-03-22 16:39:58 +00:00
|
|
|
|
2020-11-29 16:44:02 +00:00
|
|
|
|
|
|
|
def wait_parts_mover(node, table, *args, **kwargs):
|
|
|
|
# wait for MergeTreePartsMover
|
|
|
|
assert_logs_contain_with_retry(
|
|
|
|
node, f"default.{table}.*Removed part from old location", *args, **kwargs
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
|
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,alter",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_rule_with_invalid_destination", "MergeTree()", 0, id="case0"
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_rule_with_invalid_destination",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",
|
|
|
|
0,
|
|
|
|
id="case1",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_rule_with_invalid_destination", "MergeTree()", 1, id="case2"
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_rule_with_invalid_destination",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_rule_with_invalid_destination', '1')",
|
|
|
|
1,
|
|
|
|
id="case3",
|
|
|
|
),
|
2019-12-26 08:41:09 +00:00
|
|
|
],
|
|
|
|
)
|
2019-12-26 12:01:37 +00:00
|
|
|
def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
try:
|
2022-03-22 16:39:58 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
def get_command(x, policy):
|
|
|
|
x = x or ""
|
|
|
|
if alter and x:
|
|
|
|
return """
|
|
|
|
ALTER TABLE {name} MODIFY TTL {expression}
|
|
|
|
""".format(
|
|
|
|
expression=x, name=name
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return """
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
{expression}
|
|
|
|
SETTINGS storage_policy='{policy}'
|
|
|
|
""".format(
|
|
|
|
expression=x, name=name, engine=engine, policy=policy
|
|
|
|
)
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
if alter:
|
|
|
|
node1.query(get_command(None, "small_jbod_with_external"))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
with pytest.raises(QueryRuntimeException):
|
|
|
|
node1.query(
|
|
|
|
get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external")
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
if alter:
|
|
|
|
node1.query(get_command(None, "small_jbod_with_external"))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
with pytest.raises(QueryRuntimeException):
|
|
|
|
node1.query(
|
|
|
|
get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external")
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
if alter:
|
|
|
|
node1.query(get_command(None, "only_jbod2"))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
with pytest.raises(QueryRuntimeException):
|
|
|
|
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
if alter:
|
|
|
|
node1.query(get_command(None, "only_jbod2"))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
with pytest.raises(QueryRuntimeException):
|
|
|
|
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2019-12-26 12:01:37 +00:00
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-12-26 08:41:09 +00:00
|
|
|
|
|
|
|
|
2019-11-28 05:07:22 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,positive",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_inserts_to_disk_do_not_work",
|
|
|
|
"MergeTree()",
|
|
|
|
0,
|
|
|
|
id="mt_test_inserts_to_disk_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_inserts_to_disk_do_not_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_do_not_work', '1')",
|
|
|
|
0,
|
|
|
|
id="replicated_mt_test_inserts_to_disk_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_inserts_to_disk_work",
|
|
|
|
"MergeTree()",
|
|
|
|
1,
|
|
|
|
id="mt_test_inserts_to_disk_work_1",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_inserts_to_disk_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_disk_work', '1')",
|
|
|
|
1,
|
|
|
|
id="replicated_mt_test_inserts_to_disk_work_1",
|
|
|
|
),
|
2019-11-25 07:46:58 +00:00
|
|
|
],
|
|
|
|
)
|
2019-11-28 05:07:22 +00:00
|
|
|
def test_inserts_to_disk_work(started_cluster, name, engine, positive):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
2019-11-26 08:52:55 +00:00
|
|
|
d1 DateTime
|
2019-11-25 07:46:58 +00:00
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
2019-11-26 08:52:55 +00:00
|
|
|
TTL d1 TO DISK 'external'
|
2019-11-25 07:46:58 +00:00
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
2019-11-26 08:52:55 +00:00
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 10MB in total
|
2019-11-25 07:46:58 +00:00
|
|
|
for i in range(10):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
2020-10-02 16:54:07 +00:00
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(
|
|
|
|
time.time() - 1 if i > 0 or positive else time.time() + 300
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2020-10-02 16:54:07 +00:00
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-26 08:52:55 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-28 05:07:22 +00:00
|
|
|
assert set(used_disks) == {"external" if positive else "jbod1"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-28 05:32:26 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
finally:
|
2020-04-09 04:53:57 +00:00
|
|
|
try:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2020-04-09 04:53:57 +00:00
|
|
|
except:
|
|
|
|
pass
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
|
2020-06-30 05:38:40 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_work_after_storage_policy_change",
|
|
|
|
"MergeTree()",
|
|
|
|
id="mt_test_moves_work_after_storage_policy_change",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_work_after_storage_policy_change",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/test_moves_work_after_storage_policy_change', '1')",
|
|
|
|
id="replicated_mt_test_moves_work_after_storage_policy_change",
|
|
|
|
),
|
2020-06-30 05:38:40 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_moves_work_after_storage_policy_change(started_cluster, name, engine):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2020-06-30 05:38:40 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-08-12 08:55:04 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node1.query(
|
|
|
|
"""ALTER TABLE {name} MODIFY SETTING storage_policy='default_with_small_jbod_with_external'""".format(
|
|
|
|
name=name
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-30 05:38:40 +00:00
|
|
|
|
|
|
|
# Second expression is preferred because d1 > now()-3600.
|
2020-09-16 04:26:10 +00:00
|
|
|
node1.query(
|
2023-12-08 02:17:13 +00:00
|
|
|
"""ALTER TABLE {name} MODIFY TTL now()-3600 TO DISK 'jbod1', d1 TO DISK 'external'""".format(
|
2020-09-16 04:26:10 +00:00
|
|
|
name=name
|
2023-12-08 02:17:13 +00:00
|
|
|
),
|
2023-12-08 02:33:04 +00:00
|
|
|
settings={"allow_suspicious_ttl_expressions": 1},
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-30 05:38:40 +00:00
|
|
|
|
|
|
|
wait_expire_1 = 12
|
|
|
|
wait_expire_2 = 4
|
|
|
|
time_1 = time.time() + wait_expire_1
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 10MB in total
|
2020-06-30 05:38:40 +00:00
|
|
|
for i in range(10):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
|
|
|
("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time_1))
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-30 05:38:40 +00:00
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2020-06-30 05:38:40 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks) == {"jbod1"}
|
|
|
|
|
2020-11-29 16:44:02 +00:00
|
|
|
wait_parts_mover(node1, name, retry_count=40)
|
2020-06-30 05:38:40 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks) == {"external"}
|
|
|
|
|
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-30 05:38:40 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2020-06-30 05:38:40 +00:00
|
|
|
|
|
|
|
|
2019-11-28 05:07:22 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,positive",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_to_disk_do_not_work",
|
|
|
|
"MergeTree()",
|
|
|
|
0,
|
|
|
|
id="mt_test_moves_to_disk_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_to_disk_do_not_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_do_not_work', '1')",
|
|
|
|
0,
|
|
|
|
id="replicated_mt_test_moves_to_disk_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_to_disk_work",
|
|
|
|
"MergeTree()",
|
|
|
|
1,
|
|
|
|
id="mt_test_moves_to_disk_work",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_to_disk_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_work', '1')",
|
|
|
|
1,
|
|
|
|
id="replicated_mt_test_moves_to_disk_work",
|
|
|
|
),
|
2019-11-25 07:46:58 +00:00
|
|
|
],
|
|
|
|
)
|
2019-11-28 05:07:22 +00:00
|
|
|
def test_moves_to_disk_work(started_cluster, name, engine, positive):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
TTL d1 TO DISK 'external'
|
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-04-09 04:53:57 +00:00
|
|
|
wait_expire_1 = 12
|
2020-11-29 16:44:02 +00:00
|
|
|
wait_expire_2 = 20
|
2019-11-30 19:22:01 +00:00
|
|
|
time_1 = time.time() + wait_expire_1
|
|
|
|
time_2 = time.time() + wait_expire_1 + wait_expire_2
|
|
|
|
|
|
|
|
wait_expire_1_thread = threading.Thread(
|
|
|
|
target=time.sleep, args=(wait_expire_1,)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread.start()
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 10MB in total
|
2019-11-25 07:46:58 +00:00
|
|
|
for i in range(10):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
2020-10-02 16:54:07 +00:00
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(time_1 if i > 0 or positive else time_2),
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-26 08:52:55 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"jbod1"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread.join()
|
2020-09-16 04:26:10 +00:00
|
|
|
time.sleep(wait_expire_2 / 2)
|
2019-11-30 19:22:01 +00:00
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-28 05:07:22 +00:00
|
|
|
assert set(used_disks) == {"external" if positive else "jbod1"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-28 05:32:26 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_to_volume_work",
|
|
|
|
"MergeTree()",
|
|
|
|
id="mt_test_moves_to_volume_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_to_volume_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_volume_work', '1')",
|
|
|
|
id="replicated_mt_test_moves_to_volume_work",
|
|
|
|
),
|
2019-11-25 07:46:58 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_moves_to_volume_work(started_cluster, name, engine):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
p1 Int64,
|
|
|
|
s1 String,
|
2019-11-26 08:52:55 +00:00
|
|
|
d1 DateTime
|
2019-11-25 07:46:58 +00:00
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
PARTITION BY p1
|
2019-11-30 19:22:01 +00:00
|
|
|
TTL d1 TO VOLUME 'external'
|
|
|
|
SETTINGS storage_policy='jbods_with_external'
|
2019-11-26 08:52:55 +00:00
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1 = 10
|
|
|
|
time_1 = time.time() + wait_expire_1
|
|
|
|
|
2019-11-26 08:52:55 +00:00
|
|
|
for p in range(2):
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 10MB in total
|
2019-12-01 06:04:06 +00:00
|
|
|
for i in range(5):
|
2020-09-16 04:26:10 +00:00
|
|
|
data.append(
|
2020-10-02 16:54:07 +00:00
|
|
|
(
|
|
|
|
str(p),
|
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(time_1),
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (p1, s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks) == {"jbod1", "jbod2"}
|
|
|
|
|
2020-11-29 16:44:02 +00:00
|
|
|
wait_parts_mover(node1, name, retry_count=40)
|
2019-11-30 19:22:01 +00:00
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"external"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-12-01 06:04:06 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,positive",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_inserts_to_volume_do_not_work",
|
|
|
|
"MergeTree()",
|
|
|
|
0,
|
|
|
|
id="mt_test_inserts_to_volume_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_inserts_to_volume_do_not_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_do_not_work', '1')",
|
|
|
|
0,
|
|
|
|
id="replicated_mt_test_inserts_to_volume_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_inserts_to_volume_work",
|
|
|
|
"MergeTree()",
|
|
|
|
1,
|
|
|
|
id="mt_test_inserts_to_volume_work",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_inserts_to_volume_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_inserts_to_volume_work', '1')",
|
|
|
|
1,
|
|
|
|
id="replicated_mt_test_inserts_to_volume_work",
|
|
|
|
),
|
2019-11-25 07:46:58 +00:00
|
|
|
],
|
|
|
|
)
|
2019-11-30 19:22:01 +00:00
|
|
|
def test_inserts_to_volume_work(started_cluster, name, engine, positive):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
p1 Int64,
|
|
|
|
s1 String,
|
2019-11-26 08:52:55 +00:00
|
|
|
d1 DateTime
|
2019-11-25 07:46:58 +00:00
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
PARTITION BY p1
|
2019-11-30 19:22:01 +00:00
|
|
|
TTL d1 TO VOLUME 'external'
|
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
2019-11-26 08:52:55 +00:00
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
node1.query("SYSTEM STOP MOVES {name}".format(name=name))
|
|
|
|
|
2019-11-26 08:52:55 +00:00
|
|
|
for p in range(2):
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 20MB in total
|
2019-11-25 07:46:58 +00:00
|
|
|
for i in range(10):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
2020-10-02 16:54:07 +00:00
|
|
|
str(p),
|
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(
|
|
|
|
time.time() - 1 if i > 0 or positive else time.time() + 300
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2020-10-02 16:54:07 +00:00
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (p1, s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-30 19:22:01 +00:00
|
|
|
assert set(used_disks) == {"external" if positive else "jbod1"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-28 05:32:26 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "20"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_to_disk_eventually_work",
|
|
|
|
"MergeTree()",
|
|
|
|
id="mt_test_moves_to_disk_eventually_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_to_disk_eventually_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_to_disk_eventually_work', '1')",
|
|
|
|
id="replicated_mt_test_moves_to_disk_eventually_work",
|
|
|
|
),
|
2019-11-25 07:46:58 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_moves_to_disk_eventually_work(started_cluster, name, engine):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
try:
|
|
|
|
name_temp = name + "_temp"
|
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String
|
2019-11-28 10:57:58 +00:00
|
|
|
) ENGINE = MergeTree()
|
2019-11-25 07:46:58 +00:00
|
|
|
ORDER BY tuple()
|
|
|
|
SETTINGS storage_policy='only_jbod2'
|
2019-11-28 10:57:58 +00:00
|
|
|
""".format(
|
|
|
|
name=name_temp
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 35MB in total
|
2019-11-25 07:46:58 +00:00
|
|
|
for i in range(35):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append("randomPrintableASCII(1024*1024)")
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-10-02 16:54:07 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} VALUES {}".format(
|
|
|
|
name_temp, ",".join(["(" + x + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name_temp)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"jbod2"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
2019-11-26 08:52:55 +00:00
|
|
|
d1 DateTime
|
2019-11-25 07:46:58 +00:00
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
2019-11-26 08:52:55 +00:00
|
|
|
TTL d1 TO DISK 'jbod2'
|
2019-11-25 07:46:58 +00:00
|
|
|
SETTINGS storage_policy='jbod1_with_jbod2'
|
2019-11-26 08:52:55 +00:00
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 10MB in total
|
2019-11-25 07:46:58 +00:00
|
|
|
for i in range(10):
|
2020-09-16 04:26:10 +00:00
|
|
|
data.append(
|
2020-10-02 16:54:07 +00:00
|
|
|
(
|
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(time.time() - 1),
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-26 08:52:55 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"jbod1"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE {} SYNC".format(name_temp))
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-11-29 16:44:02 +00:00
|
|
|
wait_parts_mover(node1, name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"jbod2"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-28 05:32:26 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name_temp))
|
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
|
2020-01-30 10:21:40 +00:00
|
|
|
def test_replicated_download_ttl_info(started_cluster):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name("test_replicated_ttl_info")
|
2020-01-30 10:21:40 +00:00
|
|
|
engine = "ReplicatedMergeTree('/clickhouse/test_replicated_download_ttl_info', '{replica}')"
|
|
|
|
try:
|
|
|
|
for i, node in enumerate((node1, node2), start=1):
|
|
|
|
node.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
TTL d1 TO DISK 'external'
|
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-01-30 10:21:40 +00:00
|
|
|
|
|
|
|
node1.query("SYSTEM STOP MOVES {}".format(name))
|
|
|
|
|
2020-10-02 16:54:07 +00:00
|
|
|
node2.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES (randomPrintableASCII(1024*1024), toDateTime({}))".format(
|
|
|
|
name, time.time() - 100
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-01-30 10:21:40 +00:00
|
|
|
|
|
|
|
assert set(get_used_disks_for_table(node2, name)) == {"external"}
|
2020-11-29 16:44:02 +00:00
|
|
|
|
2020-01-30 10:21:40 +00:00
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
assert node1.query("SELECT count() FROM {}".format(name)).splitlines() == ["1"]
|
|
|
|
assert set(get_used_disks_for_table(node1, name)) == {"external"}
|
|
|
|
|
|
|
|
finally:
|
|
|
|
for node in (node1, node2):
|
|
|
|
try:
|
2023-05-03 18:06:46 +00:00
|
|
|
node.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2020-01-30 10:21:40 +00:00
|
|
|
except:
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
2019-11-28 05:07:22 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,positive",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_merges_to_disk_do_not_work",
|
|
|
|
"MergeTree()",
|
|
|
|
0,
|
|
|
|
id="mt_test_merges_to_disk_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_merges_to_disk_do_not_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_do_not_work', '1')",
|
|
|
|
0,
|
|
|
|
id="mt_test_merges_to_disk_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_merges_to_disk_work",
|
|
|
|
"MergeTree()",
|
|
|
|
1,
|
|
|
|
id="mt_test_merges_to_disk_work",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_merges_to_disk_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_merges_to_disk_work', '1')",
|
|
|
|
1,
|
|
|
|
id="replicated_mt_test_merges_to_disk_work",
|
|
|
|
),
|
2019-11-25 07:46:58 +00:00
|
|
|
],
|
|
|
|
)
|
2019-11-28 05:07:22 +00:00
|
|
|
def test_merges_to_disk_work(started_cluster, name, engine, positive):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
2019-11-26 08:52:55 +00:00
|
|
|
d1 DateTime
|
2019-11-25 07:46:58 +00:00
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
2019-11-26 08:52:55 +00:00
|
|
|
TTL d1 TO DISK 'external'
|
2019-11-25 07:46:58 +00:00
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
2019-11-26 08:52:55 +00:00
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
node1.query("SYSTEM STOP MERGES {}".format(name))
|
|
|
|
node1.query("SYSTEM STOP MOVES {}".format(name))
|
|
|
|
|
2020-04-09 04:53:57 +00:00
|
|
|
wait_expire_1 = 16
|
2020-11-29 16:44:02 +00:00
|
|
|
wait_expire_2 = 20
|
2019-11-30 19:22:01 +00:00
|
|
|
time_1 = time.time() + wait_expire_1
|
|
|
|
time_2 = time.time() + wait_expire_1 + wait_expire_2
|
|
|
|
|
|
|
|
wait_expire_1_thread = threading.Thread(
|
|
|
|
target=time.sleep, args=(wait_expire_1,)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread.start()
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
for _ in range(2):
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 16MB in total
|
2019-11-25 07:46:58 +00:00
|
|
|
for i in range(8):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
2020-10-02 16:54:07 +00:00
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(
|
|
|
|
time_1 if i > 0 or positive else time_2
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2020-10-02 16:54:07 +00:00
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"jbod1"}
|
2020-09-16 04:26:10 +00:00
|
|
|
assert (
|
|
|
|
"2"
|
|
|
|
== node1.query(
|
|
|
|
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(
|
|
|
|
name
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
).strip()
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread.join()
|
2020-09-16 04:26:10 +00:00
|
|
|
time.sleep(wait_expire_2 / 2)
|
2019-11-30 19:22:01 +00:00
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
node1.query("SYSTEM START MERGES {}".format(name))
|
|
|
|
node1.query("OPTIMIZE TABLE {}".format(name))
|
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-28 05:07:22 +00:00
|
|
|
assert set(used_disks) == {"external" if positive else "jbod1"}
|
2020-09-16 04:26:10 +00:00
|
|
|
assert (
|
|
|
|
"1"
|
|
|
|
== node1.query(
|
|
|
|
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(
|
|
|
|
name
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
).strip()
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
|
2019-11-28 05:32:26 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "16"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_merges_with_full_disk_work",
|
|
|
|
"MergeTree()",
|
|
|
|
id="mt_test_merges_with_full_disk_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_merges_with_full_disk_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_merges_with_full_disk_work', '1')",
|
|
|
|
id="replicated_mt_test_merges_with_full_disk_work",
|
|
|
|
),
|
2019-11-25 07:46:58 +00:00
|
|
|
],
|
|
|
|
)
|
2019-11-30 19:22:01 +00:00
|
|
|
def test_merges_with_full_disk_work(started_cluster, name, engine):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
try:
|
|
|
|
name_temp = name + "_temp"
|
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String
|
2019-11-28 10:57:58 +00:00
|
|
|
) ENGINE = MergeTree()
|
2019-11-25 07:46:58 +00:00
|
|
|
ORDER BY tuple()
|
|
|
|
SETTINGS storage_policy='only_jbod2'
|
2019-11-28 10:57:58 +00:00
|
|
|
""".format(
|
|
|
|
name=name_temp
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 35MB in total
|
2019-11-25 07:46:58 +00:00
|
|
|
for i in range(35):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append("randomPrintableASCII(1024*1024)")
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-10-02 16:54:07 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} VALUES {}".format(
|
|
|
|
name_temp, ",".join(["(" + x + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name_temp)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"jbod2"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
2019-11-26 08:52:55 +00:00
|
|
|
d1 DateTime
|
2019-11-25 07:46:58 +00:00
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
2019-11-30 19:22:01 +00:00
|
|
|
TTL d1 TO DISK 'jbod2'
|
|
|
|
SETTINGS storage_policy='jbod1_with_jbod2'
|
2019-11-26 08:52:55 +00:00
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1 = 10
|
|
|
|
time_1 = time.time() + wait_expire_1
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread = threading.Thread(
|
|
|
|
target=time.sleep, args=(wait_expire_1,)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread.start()
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
for _ in range(2):
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 12MB in total
|
2019-11-30 19:22:01 +00:00
|
|
|
for i in range(6):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
|
|
|
("randomPrintableASCII(1024*1024)", "toDateTime({})".format(time_1))
|
|
|
|
) # 1MB row
|
2020-09-16 04:26:10 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"jbod1"}
|
2020-09-16 04:26:10 +00:00
|
|
|
assert (
|
|
|
|
"2"
|
|
|
|
== node1.query(
|
|
|
|
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(
|
|
|
|
name
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
).strip()
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread.join()
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
node1.query("OPTIMIZE TABLE {}".format(name))
|
2019-11-30 19:22:01 +00:00
|
|
|
time.sleep(1)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2020-09-16 04:26:10 +00:00
|
|
|
assert set(used_disks) == {"jbod1"} # Merged to the same disk against the rule.
|
|
|
|
assert (
|
|
|
|
"1"
|
|
|
|
== node1.query(
|
|
|
|
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(
|
|
|
|
name
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
).strip()
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "12"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name_temp))
|
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
|
2019-11-28 05:07:22 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,positive",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_after_merges_do_not_work",
|
|
|
|
"MergeTree()",
|
|
|
|
0,
|
|
|
|
id="mt_test_moves_after_merges_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_after_merges_do_not_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_do_not_work', '1')",
|
|
|
|
0,
|
|
|
|
id="replicated_mt_test_moves_after_merges_do_not_work",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_after_merges_work",
|
|
|
|
"MergeTree()",
|
|
|
|
1,
|
|
|
|
id="mt_test_moves_after_merges_work",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_after_merges_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_merges_work', '1')",
|
|
|
|
1,
|
|
|
|
id="replicated_mt_test_moves_after_merges_work",
|
|
|
|
),
|
2019-11-25 07:46:58 +00:00
|
|
|
],
|
|
|
|
)
|
2019-11-28 05:07:22 +00:00
|
|
|
def test_moves_after_merges_work(started_cluster, name, engine, positive):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
TTL d1 TO DISK 'external'
|
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-04-09 04:53:57 +00:00
|
|
|
wait_expire_1 = 16
|
2020-11-29 16:44:02 +00:00
|
|
|
wait_expire_2 = 20
|
2019-11-30 19:22:01 +00:00
|
|
|
time_1 = time.time() + wait_expire_1
|
|
|
|
time_2 = time.time() + wait_expire_1 + wait_expire_2
|
|
|
|
|
|
|
|
wait_expire_1_thread = threading.Thread(
|
|
|
|
target=time.sleep, args=(wait_expire_1,)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread.start()
|
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
for _ in range(2):
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 14MB in total
|
2019-12-12 13:40:42 +00:00
|
|
|
for i in range(7):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
2020-10-02 16:54:07 +00:00
|
|
|
"randomPrintableASCII(1024*1024)",
|
2020-09-16 04:26:10 +00:00
|
|
|
"toDateTime({})".format(
|
|
|
|
time_1 if i > 0 or positive else time_2
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
) # 1MB row
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
node1.query("OPTIMIZE TABLE {}".format(name))
|
2019-11-30 19:22:01 +00:00
|
|
|
|
2019-11-25 07:46:58 +00:00
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-26 08:52:55 +00:00
|
|
|
assert set(used_disks) == {"jbod1"}
|
2020-09-16 04:26:10 +00:00
|
|
|
assert (
|
|
|
|
"1"
|
|
|
|
== node1.query(
|
|
|
|
"SELECT count() FROM system.parts WHERE table = '{}' AND active = 1".format(
|
|
|
|
name
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
).strip()
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-11-30 19:22:01 +00:00
|
|
|
wait_expire_1_thread.join()
|
2020-09-16 04:26:10 +00:00
|
|
|
time.sleep(wait_expire_2 / 2)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2019-11-28 05:07:22 +00:00
|
|
|
assert set(used_disks) == {"external" if positive else "jbod1"}
|
2019-11-25 07:46:58 +00:00
|
|
|
|
2019-12-12 13:40:42 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "14"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-11-25 07:46:58 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-12-26 06:57:27 +00:00
|
|
|
|
|
|
|
|
2019-12-26 17:45:15 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,positive,bar",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_after_alter_do_not_work",
|
|
|
|
"MergeTree()",
|
|
|
|
0,
|
|
|
|
"DELETE",
|
|
|
|
id="mt_negative",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_after_alter_do_not_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",
|
|
|
|
0,
|
|
|
|
"DELETE",
|
|
|
|
id="repicated_negative",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_after_alter_work",
|
|
|
|
"MergeTree()",
|
|
|
|
1,
|
|
|
|
"DELETE",
|
|
|
|
id="mt_positive",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_after_alter_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",
|
|
|
|
1,
|
|
|
|
"DELETE",
|
|
|
|
id="repicated_positive",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_after_alter_do_not_work",
|
|
|
|
"MergeTree()",
|
|
|
|
0,
|
|
|
|
"TO DISK 'external'",
|
|
|
|
id="mt_external_negative",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_after_alter_do_not_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_do_not_work', '1')",
|
|
|
|
0,
|
|
|
|
"TO DISK 'external'",
|
|
|
|
id="replicated_external_negative",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_moves_after_alter_work",
|
|
|
|
"MergeTree()",
|
|
|
|
1,
|
|
|
|
"TO DISK 'external'",
|
|
|
|
id="mt_external_positive",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_moves_after_alter_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_moves_after_alter_work', '1')",
|
|
|
|
1,
|
|
|
|
"TO DISK 'external'",
|
|
|
|
id="replicated_external_positive",
|
|
|
|
),
|
2019-12-26 06:57:27 +00:00
|
|
|
],
|
|
|
|
)
|
2019-12-26 17:45:15 +00:00
|
|
|
def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, bar):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-12-26 06:57:27 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
TTL d1 TO DISK 'external'
|
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-12-26 06:57:27 +00:00
|
|
|
|
|
|
|
if positive:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
ALTER TABLE {name}
|
|
|
|
MODIFY TTL
|
2019-12-26 17:45:15 +00:00
|
|
|
d1 + INTERVAL 15 MINUTE {bar}
|
2020-09-16 04:26:10 +00:00
|
|
|
""".format(
|
|
|
|
name=name, bar=bar
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
) # That shall disable TTL.
|
2019-12-26 06:57:27 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 10MB in total
|
2019-12-26 06:57:27 +00:00
|
|
|
for i in range(10):
|
2020-09-16 04:26:10 +00:00
|
|
|
data.append(
|
2020-10-02 16:54:07 +00:00
|
|
|
(
|
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(time.time() - 1),
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-10-02 16:54:07 +00:00
|
|
|
) # 1MB row
|
2019-12-26 06:57:27 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-12-26 06:57:27 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks) == {"jbod1" if positive else "external"}
|
|
|
|
|
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-12-26 06:57:27 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2019-12-20 12:19:52 +00:00
|
|
|
|
|
|
|
|
2020-03-10 11:55:27 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param("mt_test_materialize_ttl_in_partition", "MergeTree()", id="mt"),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_materialize_ttl_in_partition",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/test_materialize_ttl_in_partition', '1')",
|
|
|
|
id="replicated",
|
|
|
|
),
|
2020-03-10 11:55:27 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_materialize_ttl_in_partition(started_cluster, name, engine):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2020-03-10 11:55:27 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
p1 Int8,
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY p1
|
|
|
|
PARTITION BY p1
|
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-03-10 11:55:27 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 5MB in total
|
2020-03-10 11:55:27 +00:00
|
|
|
for i in range(5):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
2020-10-02 16:54:07 +00:00
|
|
|
str(i),
|
|
|
|
"randomPrintableASCII(1024*1024)",
|
2020-09-16 04:26:10 +00:00
|
|
|
"toDateTime({})".format(time.time() - 1),
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
) # 1MB row
|
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (p1, s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2020-03-10 11:55:27 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks) == {"jbod1"}
|
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
ALTER TABLE {name}
|
|
|
|
MODIFY TTL
|
2020-05-19 22:43:16 +00:00
|
|
|
d1 TO DISK 'external' SETTINGS materialize_ttl_after_modify = 0
|
2020-03-10 11:55:27 +00:00
|
|
|
""".format(
|
|
|
|
name=name
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-03-10 11:55:27 +00:00
|
|
|
|
2020-11-29 16:44:02 +00:00
|
|
|
time.sleep(3)
|
2020-03-10 11:55:27 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks) == {"jbod1"}
|
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
ALTER TABLE {name}
|
|
|
|
MATERIALIZE TTL IN PARTITION 2
|
|
|
|
""".format(
|
|
|
|
name=name
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-03-10 11:55:27 +00:00
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
ALTER TABLE {name}
|
|
|
|
MATERIALIZE TTL IN PARTITION 4
|
|
|
|
""".format(
|
|
|
|
name=name
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-03-10 11:55:27 +00:00
|
|
|
|
2020-11-29 16:44:02 +00:00
|
|
|
time.sleep(3)
|
2020-03-10 11:55:27 +00:00
|
|
|
|
|
|
|
used_disks_sets = []
|
|
|
|
for i in range(len(data)):
|
|
|
|
used_disks_sets.append(
|
|
|
|
set(get_used_disks_for_table(node1, name, partition=i))
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
|
2020-03-10 11:55:27 +00:00
|
|
|
assert used_disks_sets == [
|
|
|
|
{"jbod1"},
|
|
|
|
{"jbod1"},
|
|
|
|
{"external"},
|
|
|
|
{"jbod1"},
|
|
|
|
{"external"},
|
|
|
|
]
|
|
|
|
|
|
|
|
assert node1.query(
|
|
|
|
"SELECT count() FROM {name}".format(name=name)
|
|
|
|
).strip() == str(len(data))
|
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2020-03-10 11:55:27 +00:00
|
|
|
|
|
|
|
|
2019-12-20 12:19:52 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,positive",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_alter_multiple_ttls_positive", "MergeTree()", True, id="positive"
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_replicated_test_alter_multiple_ttls_positive",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_positive', '1')",
|
|
|
|
True,
|
|
|
|
id="replicated_positive",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_alter_multiple_ttls_negative", "MergeTree()", False, id="negative"
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_replicated_test_alter_multiple_ttls_negative",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_alter_multiple_ttls_negative', '1')",
|
|
|
|
False,
|
|
|
|
id="replicated_negative",
|
|
|
|
),
|
2019-12-20 12:19:52 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_alter_multiple_ttls(started_cluster, name, engine, positive):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2019-12-20 12:19:52 +00:00
|
|
|
"""Check that when multiple TTL expressions are set
|
|
|
|
and before any parts are inserted the TTL expressions
|
|
|
|
are changed with ALTER command then all old
|
|
|
|
TTL expressions are removed and the
|
|
|
|
the parts are moved to the specified disk or volume or
|
|
|
|
deleted if the new TTL expression is triggered
|
|
|
|
and are not moved or deleted when it is not.
|
|
|
|
"""
|
|
|
|
now = time.time()
|
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
p1 Int64,
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
PARTITION BY p1
|
2020-04-09 04:53:57 +00:00
|
|
|
TTL d1 + INTERVAL 34 SECOND TO DISK 'jbod2',
|
|
|
|
d1 + INTERVAL 64 SECOND TO VOLUME 'external'
|
2019-12-20 12:19:52 +00:00
|
|
|
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-12-20 12:19:52 +00:00
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
ALTER TABLE {name} MODIFY
|
2020-01-15 15:44:43 +00:00
|
|
|
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
|
2020-04-09 04:53:57 +00:00
|
|
|
d1 + INTERVAL 14 SECOND TO VOLUME 'external',
|
2020-06-02 08:42:42 +00:00
|
|
|
d1 + INTERVAL 19 SECOND DELETE
|
2019-12-20 12:19:52 +00:00
|
|
|
""".format(
|
|
|
|
name=name
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2019-12-20 12:19:52 +00:00
|
|
|
|
|
|
|
for p in range(3):
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 6MB in total
|
2019-12-20 12:19:52 +00:00
|
|
|
now = time.time()
|
|
|
|
for i in range(2):
|
|
|
|
p1 = p
|
|
|
|
d1 = now - 1 if i > 0 or positive else now + 300
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
|
|
|
"({}, randomPrintableASCII(1024*1024), toDateTime({}))".format(
|
|
|
|
p1, d1
|
|
|
|
)
|
2019-12-20 12:19:52 +00:00
|
|
|
)
|
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {name} (p1, s1, d1) VALUES {values}".format(
|
|
|
|
name=name, values=",".join(data)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2019-12-20 12:19:52 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks) == {"jbod2"} if positive else {"jbod1", "jbod2"}
|
|
|
|
|
|
|
|
assert node1.query(
|
|
|
|
"SELECT count() FROM {name}".format(name=name)
|
|
|
|
).splitlines() == ["6"]
|
|
|
|
|
2020-06-02 08:42:42 +00:00
|
|
|
if positive:
|
|
|
|
expected_disks = {"external"}
|
|
|
|
else:
|
|
|
|
expected_disks = {"jbod1", "jbod2"}
|
2019-12-20 12:19:52 +00:00
|
|
|
|
2020-06-02 08:42:42 +00:00
|
|
|
check_used_disks_with_retry(node1, name, expected_disks, 50)
|
2019-12-20 12:19:52 +00:00
|
|
|
|
|
|
|
assert node1.query(
|
|
|
|
"SELECT count() FROM {name}".format(name=name)
|
|
|
|
).splitlines() == ["6"]
|
|
|
|
|
2020-06-02 08:42:42 +00:00
|
|
|
time.sleep(5)
|
|
|
|
|
|
|
|
for i in range(50):
|
|
|
|
rows_count = int(
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip()
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-02 08:42:42 +00:00
|
|
|
if positive:
|
|
|
|
if rows_count == 0:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
if rows_count == 3:
|
|
|
|
break
|
|
|
|
node1.query("OPTIMIZE TABLE {name} FINAL".format(name=name))
|
|
|
|
time.sleep(0.5)
|
2019-12-20 12:19:52 +00:00
|
|
|
|
2020-06-02 08:42:42 +00:00
|
|
|
if positive:
|
|
|
|
assert rows_count == 0
|
|
|
|
else:
|
|
|
|
assert rows_count == 3
|
2019-12-20 12:19:52 +00:00
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {name} SYNC".format(name=name))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param("concurrently_altering_ttl_mt", "MergeTree()", id="mt"),
|
|
|
|
pytest.param(
|
|
|
|
"concurrently_altering_ttl_replicated_mt",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/concurrently_altering_ttl_replicated_mt', '1')",
|
|
|
|
id="replicated_mt",
|
|
|
|
),
|
2020-01-15 15:44:43 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_concurrent_alter_with_ttl_move(started_cluster, name, engine):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2020-01-15 15:44:43 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
EventDate Date,
|
|
|
|
number UInt64
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
PARTITION BY toYYYYMM(EventDate)
|
|
|
|
SETTINGS storage_policy='jbods_with_external'
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-01-15 15:44:43 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
values = list({random.randint(1, 1000000) for _ in range(0, 1000)})
|
2020-01-15 15:44:43 +00:00
|
|
|
|
|
|
|
def insert(num):
|
|
|
|
for i in range(num):
|
|
|
|
day = random.randint(11, 30)
|
|
|
|
value = values.pop()
|
|
|
|
month = "0" + str(random.choice([3, 4]))
|
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} VALUES(toDate('2019-{m}-{d}'), {v})".format(
|
|
|
|
name, m=month, d=day, v=value
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-01-15 15:44:43 +00:00
|
|
|
|
|
|
|
def alter_move(num):
|
|
|
|
def produce_alter_move(node, name):
|
|
|
|
move_type = random.choice(["PART", "PARTITION"])
|
|
|
|
if move_type == "PART":
|
|
|
|
for _ in range(10):
|
|
|
|
try:
|
2020-09-16 04:26:10 +00:00
|
|
|
parts = (
|
|
|
|
node1.query(
|
|
|
|
"SELECT name from system.parts where table = '{}' and active = 1".format(
|
|
|
|
name
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
.strip()
|
|
|
|
.split("\n")
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-01-15 15:44:43 +00:00
|
|
|
break
|
|
|
|
except QueryRuntimeException:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise Exception("Cannot select from system.parts")
|
|
|
|
|
|
|
|
move_part = random.choice(["'" + part + "'" for part in parts])
|
|
|
|
else:
|
|
|
|
move_part = random.choice([201903, 201904])
|
|
|
|
|
|
|
|
move_disk = random.choice(["DISK", "VOLUME"])
|
|
|
|
if move_disk == "DISK":
|
|
|
|
move_volume = random.choice(["'external'", "'jbod1'", "'jbod2'"])
|
|
|
|
else:
|
|
|
|
move_volume = random.choice(["'main'", "'external'"])
|
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"ALTER TABLE {} MOVE {mt} {mp} TO {md} {mv}".format(
|
|
|
|
name,
|
|
|
|
mt=move_type,
|
|
|
|
mp=move_part,
|
|
|
|
md=move_disk,
|
|
|
|
mv=move_volume,
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-04-09 04:53:57 +00:00
|
|
|
except QueryRuntimeException:
|
2020-01-15 15:44:43 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
for i in range(num):
|
|
|
|
produce_alter_move(node1, name)
|
|
|
|
|
|
|
|
def alter_update(num):
|
|
|
|
for i in range(num):
|
2020-10-21 10:11:26 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"ALTER TABLE {} UPDATE number = number + 1 WHERE 1".format(name)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-10-21 10:11:26 +00:00
|
|
|
except:
|
|
|
|
pass
|
2020-01-15 15:44:43 +00:00
|
|
|
|
|
|
|
def alter_modify_ttl(num):
|
|
|
|
for i in range(num):
|
|
|
|
ttls = []
|
|
|
|
for j in range(random.randint(1, 10)):
|
2020-09-16 04:26:10 +00:00
|
|
|
what = random.choice(
|
|
|
|
[
|
|
|
|
"TO VOLUME 'main'",
|
|
|
|
"TO VOLUME 'external'",
|
|
|
|
"TO DISK 'jbod1'",
|
|
|
|
"TO DISK 'jbod2'",
|
|
|
|
"TO DISK 'external'",
|
2022-03-22 16:39:58 +00:00
|
|
|
]
|
2020-09-16 04:26:10 +00:00
|
|
|
)
|
2020-01-15 15:44:43 +00:00
|
|
|
when = "now()+{}".format(random.randint(-1, 5))
|
|
|
|
ttls.append("{} {}".format(when, what))
|
2020-04-09 04:53:57 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"ALTER TABLE {} MODIFY TTL {}".format(name, ", ".join(ttls))
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-04-09 04:53:57 +00:00
|
|
|
except QueryRuntimeException:
|
|
|
|
pass
|
2020-01-15 15:44:43 +00:00
|
|
|
|
|
|
|
def optimize_table(num):
|
|
|
|
for i in range(num):
|
2020-09-16 04:26:10 +00:00
|
|
|
try: # optimize may throw after concurrent alter
|
2020-06-08 11:25:30 +00:00
|
|
|
node1.query(
|
|
|
|
"OPTIMIZE TABLE {} FINAL".format(name),
|
|
|
|
settings={"optimize_throw_if_noop": "1"},
|
|
|
|
)
|
|
|
|
break
|
2020-06-02 15:11:51 +00:00
|
|
|
except:
|
|
|
|
pass
|
2020-01-15 15:44:43 +00:00
|
|
|
|
|
|
|
p = Pool(15)
|
|
|
|
tasks = []
|
|
|
|
for i in range(5):
|
2020-10-21 10:11:26 +00:00
|
|
|
tasks.append(p.apply_async(insert, (30,)))
|
|
|
|
tasks.append(p.apply_async(alter_move, (30,)))
|
|
|
|
tasks.append(p.apply_async(alter_update, (30,)))
|
|
|
|
tasks.append(p.apply_async(alter_modify_ttl, (30,)))
|
|
|
|
tasks.append(p.apply_async(optimize_table, (30,)))
|
2020-01-15 15:44:43 +00:00
|
|
|
|
|
|
|
for task in tasks:
|
2020-01-20 23:25:43 +00:00
|
|
|
task.get(timeout=120)
|
2020-01-15 15:44:43 +00:00
|
|
|
|
|
|
|
assert node1.query("SELECT 1") == "1\n"
|
2020-10-21 10:11:26 +00:00
|
|
|
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "150\n"
|
2020-01-15 15:44:43 +00:00
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {name} SYNC".format(name=name))
|
2020-01-18 22:03:25 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2020-06-08 11:25:30 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,engine,positive",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param("mt_test_alter_with_merge_do_not_work", "MergeTree()", 0, id="mt"),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_alter_with_merge_do_not_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_do_not_work', '1')",
|
|
|
|
0,
|
|
|
|
id="replicated",
|
|
|
|
),
|
|
|
|
pytest.param("mt_test_alter_with_merge_work", "MergeTree()", 1, id="mt_work"),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_alter_with_merge_work",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_alter_with_merge_work', '1')",
|
|
|
|
1,
|
|
|
|
id="replicated_work",
|
|
|
|
),
|
2020-06-08 11:25:30 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_alter_with_merge_work(started_cluster, name, engine, positive):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2020-06-08 11:25:30 +00:00
|
|
|
"""Check that TTL expressions are re-evaluated for
|
|
|
|
existing parts after ALTER command changes TTL expressions
|
|
|
|
and parts are merged.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
TTL d1 + INTERVAL 3000 SECOND TO DISK 'jbod2',
|
|
|
|
d1 + INTERVAL 6000 SECOND TO VOLUME 'external'
|
|
|
|
SETTINGS storage_policy='jbods_with_external', merge_with_ttl_timeout=0
|
|
|
|
""".format(
|
|
|
|
name=name, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-08 11:25:30 +00:00
|
|
|
|
|
|
|
def optimize_table(num):
|
|
|
|
for i in range(num):
|
2020-09-16 04:26:10 +00:00
|
|
|
try: # optimize may throw after concurrent alter
|
2020-06-08 11:25:30 +00:00
|
|
|
node1.query(
|
|
|
|
"OPTIMIZE TABLE {} FINAL".format(name),
|
|
|
|
settings={"optimize_throw_if_noop": "1"},
|
|
|
|
)
|
|
|
|
break
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
for p in range(3):
|
2020-09-16 04:26:10 +00:00
|
|
|
data = [] # 6MB in total
|
2020-06-08 11:25:30 +00:00
|
|
|
now = time.time()
|
|
|
|
for i in range(2):
|
|
|
|
d1 = now - 1 if positive else now + 300
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
|
|
|
"(randomPrintableASCII(1024*1024), toDateTime({}))".format(d1)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-08 11:25:30 +00:00
|
|
|
values = ",".join(data)
|
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {name} (s1, d1) VALUES {values}".format(
|
|
|
|
name=name, values=values
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-08 11:25:30 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks) == {"jbod1", "jbod2"}
|
|
|
|
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).splitlines() == [
|
|
|
|
"6"
|
|
|
|
]
|
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
ALTER TABLE {name} MODIFY
|
|
|
|
TTL d1 + INTERVAL 0 SECOND TO DISK 'jbod2',
|
|
|
|
d1 + INTERVAL 5 SECOND TO VOLUME 'external',
|
2024-08-04 12:22:24 +00:00
|
|
|
d1 + INTERVAL 30 SECOND DELETE
|
2020-06-08 11:25:30 +00:00
|
|
|
""".format(
|
|
|
|
name=name
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-08 11:25:30 +00:00
|
|
|
|
|
|
|
optimize_table(20)
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
assert (
|
|
|
|
node1.query(
|
|
|
|
"SELECT count() FROM system.parts WHERE table = '{name}' AND active = 1".format(
|
|
|
|
name=name
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-16 04:26:10 +00:00
|
|
|
== "1\n"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-06-08 11:25:30 +00:00
|
|
|
|
|
|
|
time.sleep(5)
|
|
|
|
|
|
|
|
optimize_table(20)
|
|
|
|
|
|
|
|
if positive:
|
2024-08-04 12:16:17 +00:00
|
|
|
assert check_used_disks_with_retry(
|
|
|
|
node1, name, set(["external"])
|
|
|
|
), "Parts: " + node1.query(
|
|
|
|
f"SELECT disk_name, name FROM system.parts WHERE table = '{name}' AND active = 1"
|
|
|
|
)
|
2020-06-08 11:25:30 +00:00
|
|
|
else:
|
2024-08-04 12:16:17 +00:00
|
|
|
assert check_used_disks_with_retry(
|
|
|
|
node1, name, set(["jbod1", "jbod2"])
|
|
|
|
), "Parts: " + node1.query(
|
|
|
|
f"SELECT disk_name, name FROM system.parts WHERE table = '{name}' AND active = 1"
|
|
|
|
)
|
2020-06-08 11:25:30 +00:00
|
|
|
|
2024-08-04 12:22:24 +00:00
|
|
|
time.sleep(25)
|
2020-06-08 11:25:30 +00:00
|
|
|
|
|
|
|
optimize_table(20)
|
|
|
|
|
|
|
|
if positive:
|
|
|
|
assert node1.query("SELECT count() FROM {name}".format(name=name)) == "0\n"
|
|
|
|
else:
|
|
|
|
assert node1.query("SELECT count() FROM {name}".format(name=name)) == "6\n"
|
|
|
|
|
|
|
|
finally:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {name} SYNC".format(name=name))
|
2020-09-18 15:30:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,dest_type,engine",
|
|
|
|
[
|
2021-03-19 11:48:28 +00:00
|
|
|
pytest.param(
|
|
|
|
"mt_test_disabled_ttl_move_on_insert_work", "DISK", "MergeTree()", id="disk"
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"mt_test_disabled_ttl_move_on_insert_work",
|
|
|
|
"VOLUME",
|
|
|
|
"MergeTree()",
|
|
|
|
id="volume",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_disabled_ttl_move_on_insert_work",
|
|
|
|
"DISK",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')",
|
|
|
|
id="replicated_disk",
|
|
|
|
),
|
|
|
|
pytest.param(
|
|
|
|
"replicated_mt_test_disabled_ttl_move_on_insert_work",
|
|
|
|
"VOLUME",
|
|
|
|
"ReplicatedMergeTree('/clickhouse/replicated_test_disabled_ttl_move_on_insert_work', '1')",
|
|
|
|
id="replicated_volume",
|
2022-03-22 16:39:58 +00:00
|
|
|
),
|
2020-09-18 15:30:00 +00:00
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_disabled_ttl_move_on_insert(started_cluster, name, dest_type, engine):
|
2020-11-29 16:44:02 +00:00
|
|
|
name = unique_table_name(name)
|
|
|
|
|
2020-09-18 15:30:00 +00:00
|
|
|
try:
|
|
|
|
node1.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
TTL d1 TO {dest_type} 'external'
|
|
|
|
SETTINGS storage_policy='jbod_without_instant_ttl_move'
|
|
|
|
""".format(
|
|
|
|
name=name, dest_type=dest_type, engine=engine
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-18 15:30:00 +00:00
|
|
|
|
2020-09-18 18:59:56 +00:00
|
|
|
node1.query("SYSTEM STOP MOVES {}".format(name))
|
|
|
|
|
2020-09-18 15:30:00 +00:00
|
|
|
data = [] # 10MB in total
|
|
|
|
for i in range(10):
|
2020-10-02 16:54:07 +00:00
|
|
|
data.append(
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
2020-10-02 16:54:07 +00:00
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(time.time() - 1),
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-18 15:30:00 +00:00
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2020-09-18 15:30:00 +00:00
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2020-09-18 17:45:30 +00:00
|
|
|
assert set(used_disks) == {"jbod1"}
|
2020-09-18 15:30:00 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-18 15:30:00 +00:00
|
|
|
|
2020-09-18 19:10:49 +00:00
|
|
|
node1.query("SYSTEM START MOVES {}".format(name))
|
2020-09-18 15:30:00 +00:00
|
|
|
time.sleep(3)
|
|
|
|
|
|
|
|
used_disks = get_used_disks_for_table(node1, name)
|
2020-09-18 17:45:30 +00:00
|
|
|
assert set(used_disks) == {"external"}
|
2020-09-18 15:30:00 +00:00
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2020-09-18 15:30:00 +00:00
|
|
|
|
|
|
|
finally:
|
|
|
|
try:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2020-09-18 15:30:00 +00:00
|
|
|
except:
|
2020-09-18 17:45:30 +00:00
|
|
|
pass
|
2022-02-10 16:18:01 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"name,dest_type",
|
|
|
|
[
|
|
|
|
pytest.param("replicated_mt_move_if_exists", "DISK", id="replicated_disk"),
|
|
|
|
pytest.param("replicated_mt_move_if_exists", "VOLUME", id="replicated_volume"),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_ttl_move_if_exists(started_cluster, name, dest_type):
|
|
|
|
name = unique_table_name(name)
|
|
|
|
|
|
|
|
try:
|
|
|
|
query_template = """
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_mt_move_if_exists', '{node_name}')
|
|
|
|
ORDER BY tuple()
|
|
|
|
TTL d1 TO {dest_type} {if_exists} 'external'
|
|
|
|
SETTINGS storage_policy='{policy}'
|
|
|
|
"""
|
|
|
|
|
|
|
|
with pytest.raises(QueryRuntimeException):
|
|
|
|
node1.query(
|
|
|
|
query_template.format(
|
|
|
|
name=name,
|
|
|
|
node_name=node1.name,
|
|
|
|
dest_type=dest_type,
|
|
|
|
if_exists="",
|
|
|
|
policy="only_jbod_1",
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
|
2023-03-24 10:54:38 +00:00
|
|
|
for node, policy in zip(
|
2022-02-10 16:18:01 +00:00
|
|
|
[node1, node2], ["only_jbod_1", "small_jbod_with_external"]
|
|
|
|
):
|
|
|
|
node.query(
|
|
|
|
query_template.format(
|
|
|
|
name=name,
|
|
|
|
node_name=node.name,
|
|
|
|
dest_type=dest_type,
|
|
|
|
if_exists="IF EXISTS",
|
|
|
|
policy=policy,
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2022-02-10 16:18:01 +00:00
|
|
|
|
|
|
|
data = [] # 10MB in total
|
|
|
|
for i in range(10):
|
|
|
|
data.append(
|
2022-03-22 16:39:58 +00:00
|
|
|
(
|
2022-02-10 16:18:01 +00:00
|
|
|
"randomPrintableASCII(1024*1024)",
|
|
|
|
"toDateTime({})".format(time.time() - 1),
|
|
|
|
)
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2022-02-10 16:18:01 +00:00
|
|
|
|
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES {}".format(
|
|
|
|
name, ",".join(["(" + ",".join(x) + ")" for x in data])
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
|
|
|
)
|
2022-02-10 16:18:01 +00:00
|
|
|
node2.query("SYSTEM SYNC REPLICA {}".format(name))
|
|
|
|
|
|
|
|
time.sleep(5)
|
|
|
|
|
|
|
|
used_disks1 = get_used_disks_for_table(node1, name)
|
|
|
|
assert set(used_disks1) == {"jbod1"}
|
|
|
|
|
|
|
|
used_disks2 = get_used_disks_for_table(node2, name)
|
|
|
|
assert set(used_disks2) == {"external"}
|
|
|
|
|
|
|
|
assert (
|
|
|
|
node1.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2022-02-10 16:18:01 +00:00
|
|
|
assert (
|
|
|
|
node2.query("SELECT count() FROM {name}".format(name=name)).strip() == "10"
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2022-02-10 16:18:01 +00:00
|
|
|
|
|
|
|
finally:
|
|
|
|
try:
|
2023-05-03 18:06:46 +00:00
|
|
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
|
|
|
node2.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
2022-02-10 16:18:01 +00:00
|
|
|
except:
|
|
|
|
pass
|
2023-11-16 09:57:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
class TestCancelBackgroundMoving:
|
|
|
|
@pytest.fixture()
|
|
|
|
def prepare_table(self, request, started_cluster):
|
|
|
|
name = unique_table_name(request.node.name)
|
|
|
|
engine = f"ReplicatedMergeTree('/clickhouse/{name}', '1')"
|
|
|
|
|
|
|
|
node1.query(
|
|
|
|
f"""
|
|
|
|
CREATE TABLE {name} (
|
|
|
|
s1 String,
|
|
|
|
d1 DateTime
|
|
|
|
) ENGINE = {engine}
|
|
|
|
ORDER BY tuple()
|
|
|
|
TTL d1 + interval 5 second TO DISK 'external'
|
|
|
|
SETTINGS storage_policy='small_jbod_with_external'
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
|
|
|
node1.query("SYSTEM STOP MOVES")
|
|
|
|
|
|
|
|
# Insert part which is about to move
|
|
|
|
node1.query(
|
|
|
|
"INSERT INTO {} (s1, d1) VALUES (randomPrintableASCII({}), toDateTime({}))".format(
|
|
|
|
name, 10 * 1024 * 1024, time.time()
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Set low bandwidth to have enough time to cancel part moving
|
|
|
|
config = inspect.cleandoc(
|
|
|
|
f"""
|
|
|
|
<clickhouse>
|
2023-12-29 14:02:11 +00:00
|
|
|
<max_local_write_bandwidth_for_server>{256 * 1024}</max_local_write_bandwidth_for_server>
|
2023-11-16 09:57:09 +00:00
|
|
|
</clickhouse>
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
node1.replace_config(
|
|
|
|
"/etc/clickhouse-server/config.d/disk_throttling.xml", config
|
|
|
|
)
|
|
|
|
node1.restart_clickhouse()
|
|
|
|
|
|
|
|
try:
|
|
|
|
yield name
|
|
|
|
finally:
|
|
|
|
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
|
|
|
|
|
|
|
def test_cancel_background_moving_on_stop_moves_query(self, prepare_table):
|
|
|
|
name = prepare_table
|
|
|
|
|
|
|
|
# Wait for background moving task to be started
|
|
|
|
node1.query("SYSTEM START MOVES")
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node1,
|
|
|
|
f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(),
|
|
|
|
"1",
|
|
|
|
)
|
|
|
|
|
|
|
|
# Wait for background moving task to be cancelled
|
|
|
|
node1.query("SYSTEM STOP MOVES")
|
|
|
|
assert_logs_contain_with_retry(
|
|
|
|
node1, "MergeTreeBackgroundExecutor.*Cancelled moving parts"
|
|
|
|
)
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node1,
|
|
|
|
f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(),
|
|
|
|
"0",
|
|
|
|
)
|
|
|
|
|
|
|
|
# Ensure that part was not moved
|
|
|
|
assert set(get_used_disks_for_table(node1, name)) == {"jbod1"}
|
|
|
|
|
|
|
|
def test_cancel_background_moving_on_table_detach(self, prepare_table):
|
|
|
|
name = prepare_table
|
|
|
|
|
|
|
|
# Wait for background moving task to be started
|
|
|
|
node1.query("SYSTEM START MOVES")
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node1,
|
|
|
|
f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(),
|
|
|
|
"1",
|
|
|
|
)
|
|
|
|
|
|
|
|
# Wait for background moving task to be cancelled
|
|
|
|
node1.query(f"DETACH Table {name}")
|
|
|
|
assert_logs_contain_with_retry(
|
|
|
|
node1, "MergeTreeBackgroundExecutor.*Cancelled moving parts"
|
|
|
|
)
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node1,
|
|
|
|
f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(),
|
|
|
|
"0",
|
|
|
|
)
|
|
|
|
|
|
|
|
def test_cancel_background_moving_on_zookeeper_disconnect(self, prepare_table):
|
|
|
|
name = prepare_table
|
|
|
|
|
|
|
|
# Wait for background moving task to be started
|
|
|
|
node1.query("SYSTEM START MOVES")
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node1,
|
|
|
|
f"SELECT count() FROM system.moves WHERE table = '{name}'".strip(),
|
|
|
|
"1",
|
|
|
|
)
|
|
|
|
|
|
|
|
with PartitionManager() as pm:
|
|
|
|
pm.drop_instance_zk_connections(node1)
|
|
|
|
# Wait for background moving task to be cancelled
|
|
|
|
assert_logs_contain_with_retry(
|
2023-11-16 16:44:24 +00:00
|
|
|
node1,
|
|
|
|
"MergeTreeBackgroundExecutor.*Cancelled moving parts",
|
|
|
|
retry_count=30,
|
|
|
|
sleep_time=1,
|
2023-11-16 09:57:09 +00:00
|
|
|
)
|