ClickHouse/tests/integration/test_memory_limit_observer/test.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

81 lines
2.4 KiB
Python
Raw Normal View History

2024-03-12 22:31:33 +00:00
import logging
import time
2024-09-27 10:19:39 +00:00
import pytest
2024-03-12 22:31:33 +00:00
from helpers.cluster import ClickHouseCluster, run_and_check
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
2024-03-13 05:40:47 +00:00
"node1", main_configs=["config/text_log.xml"], mem_limit="5g"
2024-03-12 22:31:33 +00:00
)
2024-03-13 05:40:47 +00:00
2024-03-12 22:31:33 +00:00
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
2024-03-13 05:40:47 +00:00
2024-03-12 22:31:33 +00:00
def get_latest_mem_limit():
for _ in range(10):
try:
2024-03-13 05:40:47 +00:00
mem_limit = float(
node1.query(
"""
select extract(message, '\\d+\\.\\d+') from system.text_log
where message like '%Setting max_server_memory_usage was set to%' and
message not like '%like%' order by event_time desc limit 1
"""
).strip()
)
2024-03-12 22:31:33 +00:00
return mem_limit
2024-06-21 20:29:46 +00:00
except Exception:
2024-03-12 22:31:33 +00:00
time.sleep(1)
raise Exception("Cannot get memory limit")
2024-03-13 06:28:41 +00:00
2024-03-12 22:31:33 +00:00
def test_observe_memory_limit(started_cluster):
original_max_mem = get_latest_mem_limit()
logging.debug(f"get original memory limit {original_max_mem}")
2024-03-13 06:28:41 +00:00
run_and_check(["docker", "update", "--memory=10g", node1.docker_id])
2024-03-12 22:31:33 +00:00
for _ in range(30):
time.sleep(10)
2024-03-13 05:40:47 +00:00
new_max_mem = get_latest_mem_limit()
2024-03-12 22:31:33 +00:00
logging.debug(f"get new memory limit {new_max_mem}")
if new_max_mem > original_max_mem:
return
2024-03-15 20:00:42 +00:00
raise Exception("the memory limit does not increase as expected")
2024-06-21 20:29:46 +00:00
def test_memory_usage_doesnt_include_page_cache_size(started_cluster):
2024-06-24 16:29:41 +00:00
try:
2024-06-25 11:48:43 +00:00
# populate page cache with 4GB of data; it might be killed by OOM killer but it is fine
2024-06-24 16:29:41 +00:00
node1.exec_in_container(
2024-06-25 11:48:43 +00:00
["dd", "if=/dev/zero", "of=outputfile", "bs=1M", "count=4K"]
2024-06-24 16:29:41 +00:00
)
except Exception:
pass
2024-06-21 20:29:46 +00:00
observer_refresh_period = int(
node1.query(
"select value from system.server_settings where name = 'cgroups_memory_usage_observer_wait_time'"
).strip()
)
time.sleep(observer_refresh_period + 1)
max_mem_usage_from_cgroup = node1.query(
"""
SELECT max(toUInt64(replaceRegexpAll(message, 'Read current memory usage (\\d+) bytes.*', '\\1'))) AS max_mem
FROM system.text_log
WHERE logger_name = 'CgroupsMemoryUsageObserver' AND message LIKE 'Read current memory usage%bytes%'
"""
).strip()
2024-07-03 20:21:37 +00:00
assert int(max_mem_usage_from_cgroup) < 2 * 2**30