new tests from master

This commit is contained in:
Yatsishin Ilya 2021-05-27 07:24:16 +03:00
parent 28af06f3d0
commit ae60a3dd3a
3 changed files with 25 additions and 23 deletions

View File

@ -254,6 +254,7 @@ class ClickHouseCluster:
# available when with_hdfs == True
self.hdfs_host = "hdfs1"
self.hdfs_ip = None
self.hdfs_name_port = get_free_port()
self.hdfs_data_port = get_free_port()
self.hdfs_dir = p.abspath(p.join(self.instances_dir, "hdfs"))
@ -1108,6 +1109,7 @@ class ClickHouseCluster:
def wait_hdfs_to_start(self, hdfs_api, timeout=300):
self.hdfs_ip = self.get_instance_ip(self.hdfs_host)
start = time.time()
while time.time() - start < timeout:
try:

View File

@ -3,20 +3,20 @@ import os
import pytest
from helpers.cluster import ClickHouseCluster
@pytest.fixture(scope="function")
def cluster(request):
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
try:
if request.param == "memory":
node = cluster.add_instance('node', main_configs=['configs/enable_dictionaries.xml',
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node_memory = cluster.add_instance('node_memory', main_configs=['configs/enable_dictionaries.xml',
'configs/dictionaries/complex_key_cache_string.xml'])
if request.param == "ssd":
node = cluster.add_instance('node', main_configs=['configs/enable_dictionaries.xml',
node_ssd = cluster.add_instance('node_ssd', main_configs=['configs/enable_dictionaries.xml',
'configs/dictionaries/ssd_complex_key_cache_string.xml'])
@pytest.fixture()
def started_cluster():
try:
cluster.start()
node.query(
node_memory.query(
"create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id")
node_ssd.query(
"create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id")
yield cluster
@ -24,9 +24,9 @@ def cluster(request):
cluster.shutdown()
@pytest.mark.parametrize("cluster", ["memory", "ssd"], indirect=True)
def test_memory_consumption(cluster):
node = cluster.instances['node']
@pytest.mark.parametrize("type", ["memory", "ssd"])
def test_memory_consumption(started_cluster, type):
node = started_cluster.instances[f'node_{type}']
node.query(
"insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('w' * 8))
node.query(

View File

@ -64,7 +64,7 @@ def cluster():
cluster.start()
logging.info("Cluster started")
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
fs.mkdirs('/clickhouse')
logging.info("Created HDFS directory")
@ -75,7 +75,7 @@ def cluster():
def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30):
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
while num_tries > 0:
num_hdfs_objects = len(fs.listdir('/clickhouse'))
if num_hdfs_objects == expected:
@ -89,7 +89,7 @@ def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30):
def drop_table(cluster):
node = cluster.instances["node"]
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
hdfs_objects = fs.listdir('/clickhouse')
print('Number of hdfs objects to delete:', len(hdfs_objects), sep=' ')
@ -116,7 +116,7 @@ def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part):
node.query("INSERT INTO hdfs_test VALUES {}".format(values1))
assert node.query("SELECT * FROM hdfs_test order by dt, id FORMAT Values") == values1
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
hdfs_objects = fs.listdir('/clickhouse')
print(hdfs_objects)
@ -136,7 +136,7 @@ def test_alter_table_columns(cluster):
create_table(cluster, "hdfs_test")
node = cluster.instances["node"]
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
@ -165,7 +165,7 @@ def test_attach_detach_partition(cluster):
create_table(cluster, "hdfs_test")
node = cluster.instances["node"]
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
@ -204,7 +204,7 @@ def test_move_partition_to_another_disk(cluster):
create_table(cluster, "hdfs_test")
node = cluster.instances["node"]
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
@ -230,7 +230,7 @@ def test_table_manipulations(cluster):
create_table(cluster, "hdfs_test")
node = cluster.instances["node"]
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
@ -262,7 +262,7 @@ def test_move_replace_partition_to_another_table(cluster):
create_table(cluster, "hdfs_test")
node = cluster.instances["node"]
fs = HdfsClient(hosts='localhost')
fs = HdfsClient(hosts=cluster.hdfs_ip)
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))