2019-08-12 09:37:48 +00:00
|
|
|
import os
|
2020-09-16 04:26:10 +00:00
|
|
|
|
|
|
|
import pytest
|
2019-08-12 09:37:48 +00:00
|
|
|
from helpers.cluster import ClickHouseCluster
|
|
|
|
|
2021-05-27 04:24:16 +00:00
|
|
|
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
2021-06-04 08:43:41 +00:00
|
|
|
node_memory = cluster.add_instance('node_memory', dictionaries=['configs/dictionaries/complex_key_cache_string.xml'])
|
|
|
|
node_ssd = cluster.add_instance('node_ssd', dictionaries=['configs/dictionaries/ssd_complex_key_cache_string.xml'])
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2021-05-27 04:24:16 +00:00
|
|
|
@pytest.fixture()
|
|
|
|
def started_cluster():
|
2019-08-12 09:37:48 +00:00
|
|
|
try:
|
|
|
|
cluster.start()
|
2021-05-27 04:24:16 +00:00
|
|
|
node_memory.query(
|
|
|
|
"create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id")
|
|
|
|
node_ssd.query(
|
2020-09-16 04:26:10 +00:00
|
|
|
"create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id")
|
2019-08-12 09:37:48 +00:00
|
|
|
|
|
|
|
yield cluster
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
2021-06-03 13:10:54 +00:00
|
|
|
@pytest.mark.skip(reason="SSD cache test can run on disk only")
|
2021-05-27 04:24:16 +00:00
|
|
|
@pytest.mark.parametrize("type", ["memory", "ssd"])
|
|
|
|
def test_memory_consumption(started_cluster, type):
|
|
|
|
node = started_cluster.instances[f'node_{type}']
|
2020-09-16 04:26:10 +00:00
|
|
|
node.query(
|
|
|
|
"insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('w' * 8))
|
|
|
|
node.query(
|
|
|
|
"insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('x' * 16))
|
|
|
|
node.query(
|
|
|
|
"insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('y' * 32))
|
|
|
|
node.query(
|
|
|
|
"insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('z' * 64))
|
2019-08-12 09:37:48 +00:00
|
|
|
|
2019-08-12 14:06:17 +00:00
|
|
|
# Fill dictionary
|
|
|
|
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
2019-08-12 09:37:48 +00:00
|
|
|
|
|
|
|
allocated_first = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
|
|
|
|
2019-08-12 14:06:17 +00:00
|
|
|
alloc_array = []
|
2020-10-02 16:54:07 +00:00
|
|
|
for i in range(5):
|
2019-08-12 14:06:17 +00:00
|
|
|
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
2019-08-12 09:37:48 +00:00
|
|
|
|
2019-08-12 14:06:17 +00:00
|
|
|
allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
|
|
|
alloc_array.append(allocated)
|
|
|
|
|
|
|
|
# size doesn't grow
|
|
|
|
assert all(allocated_first >= a for a in alloc_array)
|
|
|
|
|
2020-10-02 16:54:07 +00:00
|
|
|
for i in range(5):
|
2019-08-12 14:06:17 +00:00
|
|
|
node.query("select dictGetString('radars', 'client_id', tuple(toString(number))) from numbers(0, 5000)")
|
|
|
|
|
|
|
|
allocated = int(node.query("select bytes_allocated from system.dictionaries where name = 'radars'").strip())
|
|
|
|
alloc_array.append(allocated)
|
|
|
|
|
|
|
|
# size doesn't grow
|
|
|
|
assert all(allocated_first >= a for a in alloc_array)
|