hardening

This commit is contained in:
Yatsishin Ilya 2021-06-16 15:31:19 +03:00
parent cbbeb4001b
commit f4d587a17a
6 changed files with 27 additions and 23 deletions

View File

@ -1421,9 +1421,9 @@ class ClickHouseCluster:
instance.docker_client = self.docker_client
instance.ip_address = self.get_instance_ip(instance.name)
logging.debug("Waiting for ClickHouse start...")
logging.debug("Waiting for ClickHouse start in {instance}, ip: {instance.ip_address}...")
instance.wait_for_start(start_timeout)
logging.debug("ClickHouse started")
logging.debug("ClickHouse {instance} started")
instance.client = Client(instance.ip_address, command=self.client_bin_path)
@ -1911,6 +1911,7 @@ class ClickHouseInstance:
self.get_docker_handle().start()
def wait_for_start(self, start_timeout=None, connection_timeout=None):
handle = self.get_docker_handle()
if start_timeout is None or start_timeout <= 0:
raise Exception("Invalid timeout: {}".format(start_timeout))
@ -1933,11 +1934,10 @@ class ClickHouseInstance:
return False
while True:
handle = self.get_docker_handle()
handle.reload()
status = handle.status
if status == 'exited':
raise Exception("Instance `{}' failed to start. Container status: {}, logs: {}"
.format(self.name, status, handle.logs().decode('utf-8')))
raise Exception(f"Instance `{self.name}' failed to start. Container status: {status}, logs: {handle.logs().decode('utf-8')}")
deadline = start_time + start_timeout
# It is possible that server starts slowly.
@ -1947,9 +1947,8 @@ class ClickHouseInstance:
current_time = time.time()
if current_time >= deadline:
raise Exception("Timed out while waiting for instance `{}' with ip address {} to start. "
"Container status: {}, logs: {}".format(self.name, self.ip_address, status,
handle.logs().decode('utf-8')))
raise Exception(f"Timed out while waiting for instance `{self.name}' with ip address {self.ip_address} to start. " \
f"Container status: {status}, logs: {handle.logs().decode('utf-8')}")
socket_timeout = min(start_timeout, deadline - current_time)

View File

@ -48,11 +48,11 @@ class Task:
node.query("DROP DATABASE IF EXISTS dailyhistory SYNC;")
node.query("DROP DATABASE IF EXISTS monthlyhistory SYNC;")
instance = cluster.instances['first']
first = cluster.instances['first']
# daily partition database
instance.query("CREATE DATABASE IF NOT EXISTS dailyhistory on cluster events;")
instance.query("""CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events
first.query("CREATE DATABASE IF NOT EXISTS dailyhistory on cluster events;")
first.query("""CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events
(
id UUID DEFAULT generateUUIDv4(),
vendor_id String,
@ -84,12 +84,12 @@ class Task:
ORDER BY (tpep_pickup_datetime, id)
PARTITION BY (toYYYYMMDD(tpep_pickup_datetime))""")
instance.query("""CREATE TABLE dailyhistory.yellow_tripdata
first.query("""CREATE TABLE dailyhistory.yellow_tripdata
ON CLUSTER events
AS dailyhistory.yellow_tripdata_staging
ENGINE = Distributed('events', 'dailyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""")
instance.query("""INSERT INTO dailyhistory.yellow_tripdata
first.query("""INSERT INTO dailyhistory.yellow_tripdata
SELECT * FROM generateRandom(
'id UUID DEFAULT generateUUIDv4(),
vendor_id String,
@ -119,8 +119,8 @@ class Task:
1, 10, 2) LIMIT 50;""")
# monthly partition database
instance.query("create database IF NOT EXISTS monthlyhistory on cluster events;")
instance.query("""CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events
first.query("create database IF NOT EXISTS monthlyhistory on cluster events;")
first.query("""CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events
(
id UUID DEFAULT generateUUIDv4(),
vendor_id String,
@ -153,16 +153,16 @@ class Task:
ORDER BY (tpep_pickup_datetime, id)
PARTITION BY (pickup_location_id, toYYYYMM(tpep_pickup_datetime))""")
instance.query("""CREATE TABLE monthlyhistory.yellow_tripdata
first.query("""CREATE TABLE monthlyhistory.yellow_tripdata
ON CLUSTER events
AS monthlyhistory.yellow_tripdata_staging
ENGINE = Distributed('events', 'monthlyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""")
def check(self):
instance = cluster.instances["first"]
a = TSV(instance.query("SELECT count() from dailyhistory.yellow_tripdata"))
b = TSV(instance.query("SELECT count() from monthlyhistory.yellow_tripdata"))
first = cluster.instances["first"]
a = TSV(first.query("SELECT count() from dailyhistory.yellow_tripdata"))
b = TSV(first.query("SELECT count() from monthlyhistory.yellow_tripdata"))
assert a == b, "Distributed tables"
for instance_name, instance in cluster.instances.items():

View File

@ -150,7 +150,7 @@ def test_reload_after_loading(started_cluster):
time.sleep(1) # see the comment above
replace_in_file_in_container('/etc/clickhouse-server/dictionaries/executable.xml', '82', '83')
replace_in_file_in_container('/etc/clickhouse-server/dictionaries/file.txt', '102', '103')
time.sleep(7)
time.sleep(10)
assert query("SELECT dictGetInt32('file', 'a', toUInt64(9))") == "103\n"
assert query("SELECT dictGetInt32('executable', 'a', toUInt64(7))") == "83\n"

View File

@ -53,6 +53,7 @@ def test_default_database(test_cluster):
def test_create_view(test_cluster):
instance = test_cluster.instances['ch3']
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.super_simple_view ON CLUSTER 'cluster'")
test_cluster.ddl_check_query(instance,
"CREATE VIEW test.super_simple_view ON CLUSTER 'cluster' AS SELECT * FROM system.numbers FORMAT TSV")
test_cluster.ddl_check_query(instance,
@ -76,7 +77,7 @@ def test_on_server_fail(test_cluster):
kill_instance.get_docker_handle().stop()
request = instance.get_query_request("CREATE TABLE test.test_server_fail ON CLUSTER 'cluster' (i Int8) ENGINE=Null",
timeout=30)
timeout=180)
kill_instance.get_docker_handle().start()
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.__nope__ ON CLUSTER 'cluster'")
@ -106,11 +107,11 @@ def _test_on_connection_losses(test_cluster, zk_timeout):
def test_on_connection_loss(test_cluster):
_test_on_connection_losses(test_cluster, 5) # connection loss will occur only (3 sec ZK timeout in config)
_test_on_connection_losses(test_cluster, 10) # connection loss will occur only (10 sec ZK timeout in config)
def test_on_session_expired(test_cluster):
_test_on_connection_losses(test_cluster, 15) # session should be expired (3 sec ZK timeout in config)
_test_on_connection_losses(test_cluster, 30) # session should be expired (10 sec ZK timeout in config)
def test_simple_alters(test_cluster):

View File

@ -30,6 +30,7 @@ def started_cluster():
def test_chroot_with_same_root(started_cluster):
for i, node in enumerate([node1, node2]):
node.query('DROP TABLE IF EXISTS simple SYNC')
node.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
@ -44,6 +45,7 @@ def test_chroot_with_same_root(started_cluster):
def test_chroot_with_different_root(started_cluster):
for i, node in [(1, node1), (3, node3)]:
node.query('DROP TABLE IF EXISTS simple_different SYNC')
node.query('''
CREATE TABLE simple_different (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple_different', '{replica}', date, id, 8192);

View File

@ -22,6 +22,8 @@ def started_cluster():
cluster.shutdown()
def test_identity(started_cluster):
node1.query('DROP TABLE IF EXISTS simple SYNC')
node1.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);