ClickHouse/tests/integration/test_table_db_num_limit/test.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

117 lines
3.4 KiB
Python
Raw Normal View History

2024-06-10 20:46:13 +00:00
import pytest
2024-09-27 10:19:39 +00:00
2024-06-10 20:46:13 +00:00
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node1",
with_zookeeper=True,
2024-10-29 13:23:21 +00:00
macros={"replica": "r1"},
main_configs=["config/config.xml", "config/config1.xml"],
)
node2 = cluster.add_instance(
"node2",
with_zookeeper=True,
macros={"replica": "r2"},
main_configs=["config/config.xml", "config/config2.xml"],
)
2024-06-10 20:46:13 +00:00
2024-06-10 21:02:34 +00:00
2024-06-10 20:46:13 +00:00
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
2024-06-10 21:02:34 +00:00
2024-06-10 20:46:13 +00:00
def test_table_db_limit(started_cluster):
# By the way, default database already exists.
for i in range(9):
2024-07-21 09:08:38 +00:00
node.query("create database db{}".format(i))
2024-06-10 20:46:13 +00:00
assert "TOO_MANY_DATABASES" in node.query_and_get_error(
"create database db_exp".format(i)
)
2024-06-14 13:28:21 +00:00
for i in range(10):
2024-07-21 09:08:38 +00:00
node.query("create table t{} (a Int32) Engine = Log".format(i))
# This checks that system tables are not accounted in the number of tables.
node.query("system flush logs")
2024-06-14 13:28:21 +00:00
# Regular tables
2024-06-14 13:28:21 +00:00
for i in range(10):
2024-07-21 09:08:38 +00:00
node.query("drop table t{}".format(i))
2024-06-10 20:46:13 +00:00
for i in range(10):
2024-07-21 09:08:38 +00:00
node.query("create table t{} (a Int32) Engine = Log".format(i))
2024-06-10 20:46:13 +00:00
assert "TOO_MANY_TABLES" in node.query_and_get_error(
"create table default.tx (a Int32) Engine = Log"
)
2024-10-29 08:44:50 +00:00
# Dictionaries
for i in range(10):
node.query(
"create dictionary d{} (a Int32) primary key a source(null()) layout(flat()) lifetime(1000)".format(
i
)
)
assert "TOO_MANY_TABLES" in node.query_and_get_error(
"create dictionary dx (a Int32) primary key a source(null()) layout(flat()) lifetime(1000)"
)
# Replicated tables
for i in range(10):
node.query("drop table t{}".format(i))
2024-10-29 13:23:21 +00:00
for i in range(3):
node.query(
"create table t{} on cluster 'cluster' (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/t{}', '{{replica}}') order by a".format(
i, i
)
)
# Test limit on other replica
assert "Too many replicated tables" in node2.query_and_get_error(
"create table tx (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/tx', '{replica}') order by a"
)
for i in range(3, 5):
node.query(
2024-10-29 13:23:21 +00:00
"create table t{} (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/t{}', '{{replica}}') order by a".format(
i, i
)
)
assert "Too many replicated tables" in node.query_and_get_error(
2024-10-29 13:23:21 +00:00
"create table tx (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/tx', '{replica}') order by a"
)
# Checks that replicated tables are also counted as regular tables
for i in range(5, 10):
node.query("create table t{} (a Int32) Engine = Log".format(i))
2024-07-21 09:08:38 +00:00
assert "TOO_MANY_TABLES" in node.query_and_get_error(
"create table tx (a Int32) Engine = Log"
)
2024-10-29 13:23:21 +00:00
# Cleanup
for i in range(10):
node.query("drop table t{} sync".format(i))
for i in range(3):
node2.query("drop table t{} sync".format(i))
node.query("system drop replica 'r1' from ZKPATH '/clickhouse/tables/tx'")
node.query("system drop replica 'r2' from ZKPATH '/clickhouse/tables/tx'")
for i in range(9):
node.query("drop database db{}".format(i))
for i in range(10):
node.query("drop dictionary d{}".format(i))