2020-10-15 20:23:49 +00:00
|
|
|
import uuid
|
2020-10-23 22:59:38 +00:00
|
|
|
|
2020-10-15 20:23:49 +00:00
|
|
|
from contextlib import contextmanager
|
2020-10-23 22:59:38 +00:00
|
|
|
from multiprocessing.dummy import Pool
|
2021-03-01 16:29:42 +00:00
|
|
|
from multiprocessing import TimeoutError as PoolTaskTimeoutError
|
2020-10-15 20:23:49 +00:00
|
|
|
|
2020-10-23 22:59:38 +00:00
|
|
|
from testflows.core.name import basename, parentname
|
|
|
|
from testflows._core.testtype import TestSubType
|
2020-10-15 20:23:49 +00:00
|
|
|
from testflows.core import *
|
|
|
|
|
|
|
|
from rbac.helper.tables import table_types
|
|
|
|
|
2020-11-05 16:51:17 +00:00
|
|
|
@TestStep(Given)
|
|
|
|
def instrument_clickhouse_server_log(self, node=None, clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log"):
|
|
|
|
"""Instrument clickhouse-server.log for the current test
|
|
|
|
by adding start and end messages that include
|
|
|
|
current test name to the clickhouse-server.log of the specified node and
|
|
|
|
if the test fails then dump the messages from
|
|
|
|
the clickhouse-server.log for this test.
|
|
|
|
"""
|
|
|
|
if node is None:
|
|
|
|
node = self.context.node
|
|
|
|
|
|
|
|
with By("getting current log size"):
|
|
|
|
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
|
|
|
logsize = cmd.output.split(" ")[0].strip()
|
|
|
|
|
|
|
|
try:
|
|
|
|
with And("adding test name start message to the clickhouse-server.log"):
|
|
|
|
node.command(f"echo -e \"\\n-- start: {current().name} --\\n\" >> {clickhouse_server_log}")
|
2021-03-31 06:34:52 +00:00
|
|
|
with And("dump memory info"):
|
|
|
|
node.command(f"echo -e \"\\n-- {current().name} -- top --\\n\" && top -bn1")
|
|
|
|
node.command(f"echo -e \"\\n-- {current().name} -- df --\\n\" && df -h")
|
|
|
|
node.command(f"echo -e \"\\n-- {current().name} -- free --\\n\" && free -mh")
|
2020-11-05 16:51:17 +00:00
|
|
|
yield
|
|
|
|
|
|
|
|
finally:
|
2021-03-01 16:29:42 +00:00
|
|
|
if self.context.cluster.terminating is True:
|
|
|
|
return
|
|
|
|
|
2020-11-05 16:51:17 +00:00
|
|
|
with Finally("adding test name end message to the clickhouse-server.log", flags=TE):
|
|
|
|
node.command(f"echo -e \"\\n-- end: {current().name} --\\n\" >> {clickhouse_server_log}")
|
|
|
|
|
|
|
|
with And("checking if test has failing result"):
|
|
|
|
if not self.parent.result:
|
|
|
|
with Then("dumping clickhouse-server.log for this test"):
|
|
|
|
node.command(f"tail -c +{logsize} {clickhouse_server_log}")
|
|
|
|
|
2021-03-01 16:29:42 +00:00
|
|
|
def join(tasks, polling_timeout=5):
|
2020-10-15 20:23:49 +00:00
|
|
|
"""Join all parallel tests.
|
|
|
|
"""
|
|
|
|
exc = None
|
|
|
|
while tasks:
|
|
|
|
try:
|
2021-03-01 16:29:42 +00:00
|
|
|
try:
|
|
|
|
tasks[0].get(timeout=polling_timeout)
|
|
|
|
tasks.pop(0)
|
|
|
|
|
|
|
|
except PoolTaskTimeoutError as e:
|
|
|
|
task = tasks.pop(0)
|
|
|
|
tasks.append(task)
|
|
|
|
continue
|
2020-10-15 20:23:49 +00:00
|
|
|
|
|
|
|
except KeyboardInterrupt as e:
|
|
|
|
current().context.cluster.terminating = True
|
|
|
|
continue
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
tasks.pop(0)
|
|
|
|
if exc is None:
|
|
|
|
exc = e
|
|
|
|
current().context.cluster.terminating = True
|
|
|
|
|
|
|
|
if exc is not None:
|
|
|
|
raise exc
|
|
|
|
|
|
|
|
def start(pool, tasks, scenario, kwargs=None):
|
|
|
|
"""Start parallel test.
|
|
|
|
"""
|
|
|
|
if kwargs is None:
|
|
|
|
kwargs = {}
|
|
|
|
|
|
|
|
task = pool.apply_async(scenario, [], kwargs)
|
|
|
|
tasks.append(task)
|
|
|
|
|
|
|
|
return task
|
|
|
|
|
|
|
|
def run_scenario(pool, tasks, scenario, kwargs=None):
|
|
|
|
if kwargs is None:
|
|
|
|
kwargs = {}
|
|
|
|
|
|
|
|
if current().context.parallel:
|
|
|
|
start(pool, tasks, scenario, kwargs)
|
|
|
|
else:
|
|
|
|
scenario(**kwargs)
|
|
|
|
|
|
|
|
def permutations(table_count=1):
|
|
|
|
return [*range((1 << table_count)-1)]
|
|
|
|
|
|
|
|
def getuid():
|
2020-10-23 22:59:38 +00:00
|
|
|
if current().subtype == TestSubType.Example:
|
2020-10-21 22:04:09 +00:00
|
|
|
testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',','')}"
|
|
|
|
else:
|
|
|
|
testname = f"{basename(current().name).replace(' ', '_').replace(',','')}"
|
|
|
|
return testname + "_" + str(uuid.uuid1()).replace('-', '_')
|
2020-10-15 20:23:49 +00:00
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def table(node, name, table_type_name="MergeTree"):
|
|
|
|
table_type = table_types[table_type_name]
|
|
|
|
try:
|
|
|
|
names = name.split(",")
|
|
|
|
for name in names:
|
|
|
|
with Given(f"I have {name} with engine {table_type_name}"):
|
|
|
|
node.query(f"DROP TABLE IF EXISTS {name}")
|
|
|
|
node.query(table_type.create_statement.format(name=name))
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
for name in names:
|
|
|
|
with Finally(f"I drop the table {name}"):
|
|
|
|
if table_type.cluster:
|
|
|
|
node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {table_type.cluster}")
|
|
|
|
else:
|
|
|
|
node.query(f"DROP TABLE IF EXISTS {name}")
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def user(node, name):
|
|
|
|
try:
|
|
|
|
names = name.split(",")
|
|
|
|
for name in names:
|
|
|
|
with Given("I have a user"):
|
|
|
|
node.query(f"CREATE USER OR REPLACE {name}")
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
for name in names:
|
|
|
|
with Finally("I drop the user"):
|
|
|
|
node.query(f"DROP USER IF EXISTS {name}")
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def role(node, role):
|
|
|
|
try:
|
|
|
|
roles = role.split(",")
|
|
|
|
for role in roles:
|
|
|
|
with Given("I have a role"):
|
|
|
|
node.query(f"CREATE ROLE OR REPLACE {role}")
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
for role in roles:
|
|
|
|
with Finally("I drop the role"):
|
|
|
|
node.query(f"DROP ROLE IF EXISTS {role}")
|
2021-03-01 16:29:42 +00:00
|
|
|
|
|
|
|
@TestStep(Given)
|
|
|
|
def row_policy(self, name, table, node=None):
|
|
|
|
"""Create a row policy with a given name on a given table.
|
|
|
|
"""
|
|
|
|
if node is None:
|
|
|
|
node = self.context.node
|
|
|
|
|
|
|
|
try:
|
|
|
|
with Given(f"I create row policy {name}"):
|
|
|
|
node.query(f"CREATE ROW POLICY {name} ON {table}")
|
|
|
|
yield
|
|
|
|
|
|
|
|
finally:
|
|
|
|
with Finally(f"I delete row policy {name}"):
|
|
|
|
node.query(f"DROP ROW POLICY IF EXISTS {name} ON {table}")
|
|
|
|
|
2020-10-15 20:23:49 +00:00
|
|
|
tables = {
|
|
|
|
"table0" : 1 << 0,
|
|
|
|
"table1" : 1 << 1,
|
|
|
|
"table2" : 1 << 2,
|
|
|
|
"table3" : 1 << 3,
|
|
|
|
"table4" : 1 << 4,
|
|
|
|
"table5" : 1 << 5,
|
|
|
|
"table6" : 1 << 6,
|
|
|
|
"table7" : 1 << 7,
|
|
|
|
}
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def grant_select_on_table(node, grants, target_name, *table_names):
|
|
|
|
try:
|
|
|
|
tables_granted = []
|
|
|
|
for table_number in range(len(table_names)):
|
2020-11-13 07:59:50 +00:00
|
|
|
|
2020-10-15 20:23:49 +00:00
|
|
|
if(grants & tables[f"table{table_number}"]):
|
2020-11-13 07:59:50 +00:00
|
|
|
|
2020-10-15 20:23:49 +00:00
|
|
|
with When(f"I grant select privilege on {table_names[table_number]}"):
|
|
|
|
node.query(f"GRANT SELECT ON {table_names[table_number]} TO {target_name}")
|
2020-11-13 07:59:50 +00:00
|
|
|
|
2020-10-15 20:23:49 +00:00
|
|
|
tables_granted.append(f'{table_names[table_number]}')
|
2020-11-13 07:59:50 +00:00
|
|
|
|
2020-10-15 20:23:49 +00:00
|
|
|
yield (', ').join(tables_granted)
|
2020-11-13 07:59:50 +00:00
|
|
|
|
2020-10-15 20:23:49 +00:00
|
|
|
finally:
|
|
|
|
for table_number in range(len(table_names)):
|
|
|
|
with Finally(f"I revoke the select privilege on {table_names[table_number]}"):
|
|
|
|
node.query(f"REVOKE SELECT ON {table_names[table_number]} FROM {target_name}")
|