mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-29 02:52:13 +00:00
Merge pull request #26329 from vzakaznikov/fix_testflows_rbac_sample_by_tests
Fixing RBAC sample by tests in TestFlows.
This commit is contained in:
commit
59e6502a3b
@ -3,39 +3,39 @@ from collections import namedtuple
|
||||
table_tuple = namedtuple("table_tuple", "create_statement cluster")
|
||||
|
||||
table_types = {
|
||||
"MergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = MergeTree() PARTITION BY y ORDER BY d", None),
|
||||
"ReplacingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = ReplacingMergeTree() PARTITION BY y ORDER BY d", None),
|
||||
"SummingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) ENGINE = SummingMergeTree() PARTITION BY y ORDER BY d", None),
|
||||
"AggregatingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = AggregatingMergeTree() PARTITION BY y ORDER BY d", None),
|
||||
"CollapsingMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) ENGINE = CollapsingMergeTree(sign) PARTITION BY y ORDER BY d", None),
|
||||
"VersionedCollapsingMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) ENGINE = VersionedCollapsingMergeTree(sign, version) PARTITION BY y ORDER BY d", None),
|
||||
"GraphiteMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) ENGINE = GraphiteMergeTree('graphite_rollup_example') PARTITION BY y ORDER by d", None),
|
||||
"MergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = MergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None),
|
||||
"ReplacingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = ReplacingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None),
|
||||
"SummingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) ENGINE = SummingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None),
|
||||
"AggregatingMergeTree": table_tuple("CREATE TABLE {name} (d DATE, a String, b UInt8, x String, y Int8) ENGINE = AggregatingMergeTree() PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None),
|
||||
"CollapsingMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) ENGINE = CollapsingMergeTree(sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None),
|
||||
"VersionedCollapsingMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) ENGINE = VersionedCollapsingMergeTree(sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None),
|
||||
"GraphiteMergeTree": table_tuple("CREATE TABLE {name} (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) ENGINE = GraphiteMergeTree('graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", None),
|
||||
"ReplicatedMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY d", "sharded_cluster"),
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"),
|
||||
"ReplicatedMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY d", "one_shard_cluster"),
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"),
|
||||
"ReplicatedReplacingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY d", "sharded_cluster"),
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"),
|
||||
"ReplicatedReplacingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY d", "one_shard_cluster"),
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"),
|
||||
"ReplicatedSummingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) \
|
||||
ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY d", "sharded_cluster"),
|
||||
ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"),
|
||||
"ReplicatedSummingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8 DEFAULT 1, x String, y Int8) \
|
||||
ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY d", "one_shard_cluster"),
|
||||
ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"),
|
||||
"ReplicatedAggregatingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d DATE, a String, b UInt8, x String, y Int8) \
|
||||
ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY d", "sharded_cluster"),
|
||||
ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"),
|
||||
"ReplicatedAggregatingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d DATE, a String, b UInt8, x String, y Int8) \
|
||||
ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY d", "one_shard_cluster"),
|
||||
ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"),
|
||||
"ReplicatedCollapsingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) \
|
||||
ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign) PARTITION BY y ORDER BY d", "sharded_cluster"),
|
||||
ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"),
|
||||
"ReplicatedCollapsingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, sign Int8 DEFAULT 1) \
|
||||
ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign) PARTITION BY y ORDER BY d", "one_shard_cluster"),
|
||||
ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"),
|
||||
"ReplicatedVersionedCollapsingMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) \
|
||||
ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign, version) PARTITION BY y ORDER BY d", "sharded_cluster"),
|
||||
ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"),
|
||||
"ReplicatedVersionedCollapsingMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, version UInt64, sign Int8 DEFAULT 1) \
|
||||
ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign, version) PARTITION BY y ORDER BY d", "one_shard_cluster"),
|
||||
ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', sign, version) PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"),
|
||||
"ReplicatedGraphiteMergeTree-sharded_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER sharded_cluster (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) \
|
||||
ENGINE = ReplicatedGraphiteMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', 'graphite_rollup_example') PARTITION BY y ORDER BY d", "sharded_cluster"),
|
||||
ENGINE = ReplicatedGraphiteMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', 'graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "sharded_cluster"),
|
||||
"ReplicatedGraphiteMergeTree-one_shard_cluster": table_tuple("CREATE TABLE {name} ON CLUSTER one_shard_cluster (d Date, a String, b UInt8, x String, y Int8, Path String, Time DateTime, Value Float64, col UInt64, Timestamp Int64) \
|
||||
ENGINE = ReplicatedGraphiteMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', 'graphite_rollup_example') PARTITION BY y ORDER BY d", "one_shard_cluster"),
|
||||
}
|
||||
ENGINE = ReplicatedGraphiteMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}', 'graphite_rollup_example') PARTITION BY y ORDER BY (b, d) PRIMARY KEY b", "one_shard_cluster"),
|
||||
}
|
||||
|
@ -128,10 +128,10 @@ def check_order_by_when_privilege_is_granted(table, user, node):
|
||||
column = "order"
|
||||
|
||||
with Given("I run sanity check"):
|
||||
node.query(f"ALTER TABLE {table} MODIFY ORDER BY d", settings = [("user", user)])
|
||||
node.query(f"ALTER TABLE {table} MODIFY ORDER BY b", settings = [("user", user)])
|
||||
|
||||
with And("I add new column and modify order using that column"):
|
||||
node.query(f"ALTER TABLE {table} ADD COLUMN {column} UInt32, MODIFY ORDER BY (d, {column})")
|
||||
node.query(f"ALTER TABLE {table} ADD COLUMN {column} UInt32, MODIFY ORDER BY (b, {column})")
|
||||
|
||||
with When(f"I insert random data into the ordered-by column {column}"):
|
||||
data = random.sample(range(1,1000),100)
|
||||
@ -151,7 +151,7 @@ def check_order_by_when_privilege_is_granted(table, user, node):
|
||||
|
||||
with And("I verify that the sorting key is present in the table"):
|
||||
output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output)
|
||||
assert f"ORDER BY (d, {column})" in output['statement'], error()
|
||||
assert f"ORDER BY (b, {column})" in output['statement'], error()
|
||||
|
||||
with But(f"I cannot drop the required column {column}"):
|
||||
exitcode, message = errors.missing_columns(column)
|
||||
@ -163,21 +163,13 @@ def check_sample_by_when_privilege_is_granted(table, user, node):
|
||||
"""
|
||||
column = 'sample'
|
||||
|
||||
with Given(f"I add new column {column}"):
|
||||
node.query(f"ALTER TABLE {table} ADD COLUMN {column} UInt32")
|
||||
|
||||
with When(f"I add sample by clause"):
|
||||
node.query(f"ALTER TABLE {table} MODIFY SAMPLE BY (d, {column})",
|
||||
node.query(f"ALTER TABLE {table} MODIFY SAMPLE BY b",
|
||||
settings = [("user", user)])
|
||||
|
||||
with Then("I verify that the sample is in the table"):
|
||||
output = json.loads(node.query(f"SHOW CREATE TABLE {table} FORMAT JSONEachRow").output)
|
||||
assert f"SAMPLE BY (d, {column})" in output['statement'], error()
|
||||
|
||||
with But(f"I cannot drop the required column {column}"):
|
||||
exitcode, message = errors.missing_columns(column)
|
||||
node.query(f"ALTER TABLE {table} DROP COLUMN {column}",
|
||||
exitcode=exitcode, message=message)
|
||||
assert f"SAMPLE BY b" in output['statement'], error()
|
||||
|
||||
def check_add_index_when_privilege_is_granted(table, user, node):
|
||||
"""Ensures ADD INDEX runs as expected when the privilege is granted to the specified user
|
||||
@ -258,7 +250,7 @@ def check_order_by_when_privilege_is_not_granted(table, user, node):
|
||||
"""
|
||||
with When("I try to use privilege that has not been granted"):
|
||||
exitcode, message = errors.not_enough_privileges(user)
|
||||
node.query(f"ALTER TABLE {table} MODIFY ORDER BY d",
|
||||
node.query(f"ALTER TABLE {table} MODIFY ORDER BY b",
|
||||
settings = [("user", user)], exitcode=exitcode, message=message)
|
||||
|
||||
def check_sample_by_when_privilege_is_not_granted(table, user, node):
|
||||
@ -266,7 +258,7 @@ def check_sample_by_when_privilege_is_not_granted(table, user, node):
|
||||
"""
|
||||
with When("I try to use privilege that has not been granted"):
|
||||
exitcode, message = errors.not_enough_privileges(user)
|
||||
node.query(f"ALTER TABLE {table} MODIFY SAMPLE BY d",
|
||||
node.query(f"ALTER TABLE {table} MODIFY SAMPLE BY b",
|
||||
settings = [("user", user)], exitcode=exitcode, message=message)
|
||||
|
||||
def check_add_index_when_privilege_is_not_granted(table, user, node):
|
||||
|
@ -24,7 +24,7 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
try:
|
||||
run_scenario(pool, tasks, Feature(test=load("example.regression", "regression")), args)
|
||||
#run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args)
|
||||
#run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args)
|
||||
run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args)
|
||||
run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args)
|
||||
run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args)
|
||||
run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args)
|
||||
|
Loading…
Reference in New Issue
Block a user