mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
new tests from master
This commit is contained in:
parent
28af06f3d0
commit
ae60a3dd3a
@ -254,6 +254,7 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
# available when with_hdfs == True
|
# available when with_hdfs == True
|
||||||
self.hdfs_host = "hdfs1"
|
self.hdfs_host = "hdfs1"
|
||||||
|
self.hdfs_ip = None
|
||||||
self.hdfs_name_port = get_free_port()
|
self.hdfs_name_port = get_free_port()
|
||||||
self.hdfs_data_port = get_free_port()
|
self.hdfs_data_port = get_free_port()
|
||||||
self.hdfs_dir = p.abspath(p.join(self.instances_dir, "hdfs"))
|
self.hdfs_dir = p.abspath(p.join(self.instances_dir, "hdfs"))
|
||||||
@ -1108,6 +1109,7 @@ class ClickHouseCluster:
|
|||||||
|
|
||||||
|
|
||||||
def wait_hdfs_to_start(self, hdfs_api, timeout=300):
|
def wait_hdfs_to_start(self, hdfs_api, timeout=300):
|
||||||
|
self.hdfs_ip = self.get_instance_ip(self.hdfs_host)
|
||||||
start = time.time()
|
start = time.time()
|
||||||
while time.time() - start < timeout:
|
while time.time() - start < timeout:
|
||||||
try:
|
try:
|
||||||
|
@ -3,20 +3,20 @@ import os
|
|||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
|
||||||
|
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
node_memory = cluster.add_instance('node_memory', main_configs=['configs/enable_dictionaries.xml',
|
||||||
|
'configs/dictionaries/complex_key_cache_string.xml'])
|
||||||
|
node_ssd = cluster.add_instance('node_ssd', main_configs=['configs/enable_dictionaries.xml',
|
||||||
|
'configs/dictionaries/ssd_complex_key_cache_string.xml'])
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
@pytest.fixture()
|
||||||
def cluster(request):
|
def started_cluster():
|
||||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
|
||||||
try:
|
try:
|
||||||
if request.param == "memory":
|
|
||||||
node = cluster.add_instance('node', main_configs=['configs/enable_dictionaries.xml',
|
|
||||||
'configs/dictionaries/complex_key_cache_string.xml'])
|
|
||||||
if request.param == "ssd":
|
|
||||||
node = cluster.add_instance('node', main_configs=['configs/enable_dictionaries.xml',
|
|
||||||
'configs/dictionaries/ssd_complex_key_cache_string.xml'])
|
|
||||||
cluster.start()
|
cluster.start()
|
||||||
node.query(
|
node_memory.query(
|
||||||
|
"create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id")
|
||||||
|
node_ssd.query(
|
||||||
"create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id")
|
"create table radars_table (radar_id String, radar_ip String, client_id String) engine=MergeTree() order by radar_id")
|
||||||
|
|
||||||
yield cluster
|
yield cluster
|
||||||
@ -24,9 +24,9 @@ def cluster(request):
|
|||||||
cluster.shutdown()
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("cluster", ["memory", "ssd"], indirect=True)
|
@pytest.mark.parametrize("type", ["memory", "ssd"])
|
||||||
def test_memory_consumption(cluster):
|
def test_memory_consumption(started_cluster, type):
|
||||||
node = cluster.instances['node']
|
node = started_cluster.instances[f'node_{type}']
|
||||||
node.query(
|
node.query(
|
||||||
"insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('w' * 8))
|
"insert into radars_table select toString(rand() % 5000), '{0}', '{0}' from numbers(1000)".format('w' * 8))
|
||||||
node.query(
|
node.query(
|
||||||
|
@ -64,7 +64,7 @@ def cluster():
|
|||||||
cluster.start()
|
cluster.start()
|
||||||
logging.info("Cluster started")
|
logging.info("Cluster started")
|
||||||
|
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
fs.mkdirs('/clickhouse')
|
fs.mkdirs('/clickhouse')
|
||||||
|
|
||||||
logging.info("Created HDFS directory")
|
logging.info("Created HDFS directory")
|
||||||
@ -75,7 +75,7 @@ def cluster():
|
|||||||
|
|
||||||
|
|
||||||
def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30):
|
def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30):
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
while num_tries > 0:
|
while num_tries > 0:
|
||||||
num_hdfs_objects = len(fs.listdir('/clickhouse'))
|
num_hdfs_objects = len(fs.listdir('/clickhouse'))
|
||||||
if num_hdfs_objects == expected:
|
if num_hdfs_objects == expected:
|
||||||
@ -89,7 +89,7 @@ def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30):
|
|||||||
def drop_table(cluster):
|
def drop_table(cluster):
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
|
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
hdfs_objects = fs.listdir('/clickhouse')
|
hdfs_objects = fs.listdir('/clickhouse')
|
||||||
print('Number of hdfs objects to delete:', len(hdfs_objects), sep=' ')
|
print('Number of hdfs objects to delete:', len(hdfs_objects), sep=' ')
|
||||||
|
|
||||||
@ -116,7 +116,7 @@ def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part):
|
|||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(values1))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(values1))
|
||||||
assert node.query("SELECT * FROM hdfs_test order by dt, id FORMAT Values") == values1
|
assert node.query("SELECT * FROM hdfs_test order by dt, id FORMAT Values") == values1
|
||||||
|
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
|
|
||||||
hdfs_objects = fs.listdir('/clickhouse')
|
hdfs_objects = fs.listdir('/clickhouse')
|
||||||
print(hdfs_objects)
|
print(hdfs_objects)
|
||||||
@ -136,7 +136,7 @@ def test_alter_table_columns(cluster):
|
|||||||
create_table(cluster, "hdfs_test")
|
create_table(cluster, "hdfs_test")
|
||||||
|
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
|
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
|
||||||
@ -165,7 +165,7 @@ def test_attach_detach_partition(cluster):
|
|||||||
create_table(cluster, "hdfs_test")
|
create_table(cluster, "hdfs_test")
|
||||||
|
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
|
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
|
||||||
@ -204,7 +204,7 @@ def test_move_partition_to_another_disk(cluster):
|
|||||||
create_table(cluster, "hdfs_test")
|
create_table(cluster, "hdfs_test")
|
||||||
|
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
|
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
|
||||||
@ -230,7 +230,7 @@ def test_table_manipulations(cluster):
|
|||||||
create_table(cluster, "hdfs_test")
|
create_table(cluster, "hdfs_test")
|
||||||
|
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
|
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
|
||||||
@ -262,7 +262,7 @@ def test_move_replace_partition_to_another_table(cluster):
|
|||||||
create_table(cluster, "hdfs_test")
|
create_table(cluster, "hdfs_test")
|
||||||
|
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
fs = HdfsClient(hosts='localhost')
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
||||||
|
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-03', 4096)))
|
||||||
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
|
node.query("INSERT INTO hdfs_test VALUES {}".format(generate_values('2020-01-04', 4096)))
|
||||||
|
Loading…
Reference in New Issue
Block a user