From ae60a3dd3a787c2c1d590a70edcf8cc65d3681b4 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Thu, 27 May 2021 07:24:16 +0300 Subject: [PATCH] new tests from master --- tests/integration/helpers/cluster.py | 2 ++ .../test.py | 28 +++++++++---------- .../integration/test_merge_tree_hdfs/test.py | 18 ++++++------ 3 files changed, 25 insertions(+), 23 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index e1b37576a18..bdb5cc78dff 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -254,6 +254,7 @@ class ClickHouseCluster: # available when with_hdfs == True self.hdfs_host = "hdfs1" + self.hdfs_ip = None self.hdfs_name_port = get_free_port() self.hdfs_data_port = get_free_port() self.hdfs_dir = p.abspath(p.join(self.instances_dir, "hdfs")) @@ -1108,6 +1109,7 @@ class ClickHouseCluster: def wait_hdfs_to_start(self, hdfs_api, timeout=300): + self.hdfs_ip = self.get_instance_ip(self.hdfs_host) start = time.time() while time.time() - start < timeout: try: diff --git a/tests/integration/test_dictionaries_complex_key_cache_string/test.py b/tests/integration/test_dictionaries_complex_key_cache_string/test.py index a01e60af47d..b05c4a5f3b4 100644 --- a/tests/integration/test_dictionaries_complex_key_cache_string/test.py +++ b/tests/integration/test_dictionaries_complex_key_cache_string/test.py @@ -3,20 +3,20 @@ import os import pytest from helpers.cluster import ClickHouseCluster +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +cluster = ClickHouseCluster(__file__) +node_memory = cluster.add_instance('node_memory', main_configs=['configs/enable_dictionaries.xml', + 'configs/dictionaries/complex_key_cache_string.xml']) +node_ssd = cluster.add_instance('node_ssd', main_configs=['configs/enable_dictionaries.xml', + 'configs/dictionaries/ssd_complex_key_cache_string.xml']) -@pytest.fixture(scope="function") -def cluster(request): - SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - cluster = ClickHouseCluster(__file__) +@pytest.fixture() +def started_cluster(): try: - if request.param == "memory": - node = cluster.add_instance('node', main_configs=['configs/enable_dictionaries.xml', - 'configs/dictionaries/complex_key_cache_string.xml']) - if request.param == "ssd": - node = cluster.add_instance('node', main_configs=['configs/enable_dictionaries.xml', - 'configs/dictionaries/ssd_complex_key_cache_string.xml']) cluster.start() - node.query( + node_memory.query( + "create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id") + node_ssd.query( "create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id") yield cluster @@ -24,9 +24,9 @@ def cluster(request): cluster.shutdown() -@pytest.mark.parametrize("cluster", ["memory", "ssd"], indirect=True) -def test_memory_consumption(cluster): - node = cluster.instances['node'] +@pytest.mark.parametrize("type", ["memory", "ssd"]) +def test_memory_consumption(started_cluster, type): + node = started_cluster.instances[f'node_{type}'] node.query( "insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('w' * 8)) node.query( diff --git a/tests/integration/test_merge_tree_hdfs/test.py b/tests/integration/test_merge_tree_hdfs/test.py index 2d0d9d9fb1e..0984e4d288a 100644 --- a/tests/integration/test_merge_tree_hdfs/test.py +++ b/tests/integration/test_merge_tree_hdfs/test.py @@ -64,7 +64,7 @@ def cluster(): cluster.start() logging.info("Cluster started") - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) fs.mkdirs('/clickhouse') logging.info("Created HDFS directory") @@ -75,7 +75,7 @@ def cluster(): def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30): - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) while num_tries > 0: num_hdfs_objects = len(fs.listdir('/clickhouse')) if num_hdfs_objects == expected: @@ -89,7 +89,7 @@ def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30): def drop_table(cluster): node = cluster.instances["node"] - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) hdfs_objects = fs.listdir('/clickhouse') print('Number of hdfs objects to delete:', len(hdfs_objects), sep=' ') @@ -116,7 +116,7 @@ def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part): node.query("INSERT INTO hdfs_test VALUES {}".format(values1)) assert node.query("SELECT * FROM hdfs_test order by dt, id FORMAT Values") == values1 - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) hdfs_objects = fs.listdir('/clickhouse') print(hdfs_objects) @@ -136,7 +136,7 @@ def test_alter_table_columns(cluster): create_table(cluster, "hdfs_test") node = cluster.instances["node"] - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096, -1))) @@ -165,7 +165,7 @@ def test_attach_detach_partition(cluster): create_table(cluster, "hdfs_test") node = cluster.instances["node"] - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096))) @@ -204,7 +204,7 @@ def test_move_partition_to_another_disk(cluster): create_table(cluster, "hdfs_test") node = cluster.instances["node"] - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096))) @@ -230,7 +230,7 @@ def test_table_manipulations(cluster): create_table(cluster, "hdfs_test") node = cluster.instances["node"] - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096))) @@ -262,7 +262,7 @@ def test_move_replace_partition_to_another_table(cluster): create_table(cluster, "hdfs_test") node = cluster.instances["node"] - fs = HdfsClient(hosts='localhost') + fs = HdfsClient(hosts=cluster.hdfs_ip) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096))) node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))