mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
improve tests from test_backward_compatibility
This commit is contained in:
parent
812a6ffb80
commit
58c1b57259
@ -2028,6 +2028,37 @@ class ClickHouseInstance:
|
||||
return None
|
||||
return None
|
||||
|
||||
def restart_with_original_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15):
|
||||
if not self.stay_alive:
|
||||
raise Exception("Cannot restart not stay alive container")
|
||||
self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root')
|
||||
retries = int(stop_start_wait_sec / 0.5)
|
||||
local_counter = 0
|
||||
# wait stop
|
||||
while local_counter < retries:
|
||||
if not self.get_process_pid("clickhouse server"):
|
||||
break
|
||||
time.sleep(0.5)
|
||||
local_counter += 1
|
||||
|
||||
# force kill if server hangs
|
||||
if self.get_process_pid("clickhouse server"):
|
||||
# server can die before kill, so don't throw exception, it's expected
|
||||
self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(9)], nothrow=True, user='root')
|
||||
|
||||
if callback_onstop:
|
||||
callback_onstop(self)
|
||||
self.exec_in_container(
|
||||
["bash", "-c", "cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"],
|
||||
user='root')
|
||||
self.exec_in_container(["bash", "-c",
|
||||
"cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"],
|
||||
user='root')
|
||||
self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid()))
|
||||
|
||||
# wait start
|
||||
assert_eq_with_retry(self, "select 1", "1", retry_count=retries)
|
||||
|
||||
def restart_with_latest_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15):
|
||||
if not self.stay_alive:
|
||||
raise Exception("Cannot restart not stay alive container")
|
||||
@ -2048,6 +2079,9 @@ class ClickHouseInstance:
|
||||
|
||||
if callback_onstop:
|
||||
callback_onstop(self)
|
||||
self.exec_in_container(
|
||||
["bash", "-c", "cp /usr/bin/clickhouse /usr/share/clickhouse_original"],
|
||||
user='root')
|
||||
self.exec_in_container(
|
||||
["bash", "-c", "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"],
|
||||
user='root')
|
||||
|
@ -53,3 +53,9 @@ def test_backward_compatability(start_cluster):
|
||||
node1.restart_with_latest_version()
|
||||
|
||||
assert (node1.query("SELECT avgMerge(x) FROM state") == '2.5\n')
|
||||
|
||||
node1.query("drop table tab")
|
||||
node1.query("drop table state")
|
||||
node2.query("drop table tab")
|
||||
node3.query("drop table tab")
|
||||
node4.query("drop table tab")
|
@ -5,7 +5,7 @@
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
cluster = ClickHouseCluster(__file__, name="skipping_indices")
|
||||
node = cluster.add_instance('node', image='yandex/clickhouse-server', tag='21.6', stay_alive=True, with_installed_binary=True)
|
||||
|
||||
|
||||
@ -41,4 +41,4 @@ def test_index(start_cluster):
|
||||
node.query("""
|
||||
SELECT * FROM data WHERE value = 20000 SETTINGS force_data_skipping_indices = 'value_index' SETTINGS force_data_skipping_indices = 'value_index', max_rows_to_read=1;
|
||||
DROP TABLE data;
|
||||
""")
|
||||
""")
|
@ -30,3 +30,7 @@ def test_detach_part_wrong_partition_id(start_cluster):
|
||||
|
||||
num_detached = node_21_6.query("select count() from system.detached_parts")
|
||||
assert num_detached == '1\n'
|
||||
|
||||
node_21_6.restart_with_original_version()
|
||||
|
||||
node_21_6.query("drop table tab SYNC")
|
||||
|
@ -27,3 +27,6 @@ def test_select_aggregate_alias_column(start_cluster):
|
||||
|
||||
node1.query("select sum(x_alias) from remote('node{1,2}', default, tab)")
|
||||
node2.query("select sum(x_alias) from remote('node{1,2}', default, tab)")
|
||||
|
||||
node1.query("drop table tab")
|
||||
node2.query("drop table tab")
|
@ -29,3 +29,5 @@ def test_backward_compatability(start_cluster):
|
||||
"select s, count() from remote('node{1,2}', default, tab) group by s order by toUInt64(s) limit 50")
|
||||
print(res)
|
||||
assert res == ''.join('{}\t2\n'.format(i) for i in range(50))
|
||||
node1.query("drop table tab")
|
||||
node2.query("drop table tab")
|
||||
|
Loading…
Reference in New Issue
Block a user