mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-14 19:45:11 +00:00
Merge pull request #71179 from kirillgarbar/table-limit
Hard limits on number of replicated tables, dictionaries and views
This commit is contained in:
commit
acd4f46d9c
@ -597,6 +597,30 @@ If number of tables is greater than this value, server will throw an exception.
|
||||
<max_table_num_to_throw>400</max_table_num_to_throw>
|
||||
```
|
||||
|
||||
## max\_replicated\_table\_num\_to\_throw {#max-replicated-table-num-to-throw}
|
||||
If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<max_replicated_table_num_to_throw>400</max_replicated_table_num_to_throw>
|
||||
```
|
||||
|
||||
## max\_dictionary\_num\_to\_throw {#max-dictionary-num-to-throw}
|
||||
If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<max_dictionary_num_to_throw>400</max_dictionary_num_to_throw>
|
||||
```
|
||||
|
||||
## max\_view\_num\_to\_throw {#max-view-num-to-throw}
|
||||
If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<max_view_num_to_throw>400</max_view_num_to_throw>
|
||||
```
|
||||
|
||||
## max\_database\_num\_to\_throw {#max-table-num-to-throw}
|
||||
If number of _database is greater than this value, server will throw an exception. 0 means no limitation.
|
||||
Default value: 0
|
||||
|
@ -255,6 +255,7 @@
|
||||
M(PartsActive, "Active data part, used by current and upcoming SELECTs.") \
|
||||
M(AttachedDatabase, "Active databases.") \
|
||||
M(AttachedTable, "Active tables.") \
|
||||
M(AttachedReplicatedTable, "Active replicated tables.") \
|
||||
M(AttachedView, "Active views.") \
|
||||
M(AttachedDictionary, "Active dictionaries.") \
|
||||
M(PartsOutdated, "Not active data part, but could be used by only current SELECTs, could be deleted after SELECTs finishes.") \
|
||||
|
@ -131,6 +131,9 @@ namespace DB
|
||||
DECLARE(UInt64, max_database_num_to_warn, 1000lu, "If the number of databases is greater than this value, the server will create a warning that will displayed to user.", 0) \
|
||||
DECLARE(UInt64, max_part_num_to_warn, 100000lu, "If the number of parts is greater than this value, the server will create a warning that will displayed to user.", 0) \
|
||||
DECLARE(UInt64, max_table_num_to_throw, 0lu, "If number of tables is greater than this value, server will throw an exception. 0 means no limitation. View, remote tables, dictionary, system tables are not counted. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
|
||||
DECLARE(UInt64, max_replicated_table_num_to_throw, 0lu, "If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
|
||||
DECLARE(UInt64, max_dictionary_num_to_throw, 0lu, "If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
|
||||
DECLARE(UInt64, max_view_num_to_throw, 0lu, "If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
|
||||
DECLARE(UInt64, max_database_num_to_throw, 0lu, "If number of databases is greater than this value, server will throw an exception. 0 means no limitation.", 0) \
|
||||
DECLARE(UInt64, max_authentication_methods_per_user, 100, "The maximum number of authentication methods a user can be created with or altered. Changing this setting does not affect existing users. Zero means unlimited", 0) \
|
||||
DECLARE(UInt64, concurrent_threads_soft_limit_num, 0, "Sets how many concurrent thread can be allocated before applying CPU pressure. Zero means unlimited.", 0) \
|
||||
|
@ -382,7 +382,8 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n
|
||||
if (!table_storage->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name))
|
||||
{
|
||||
LOG_TEST(log, "Counting detached table {} to database {}", table_name, database_name);
|
||||
CurrentMetrics::sub(getAttachedCounterForStorage(table_storage));
|
||||
for (auto metric : getAttachedCountersForStorage(table_storage))
|
||||
CurrentMetrics::sub(metric);
|
||||
}
|
||||
|
||||
auto table_id = table_storage->getStorageID();
|
||||
@ -430,7 +431,8 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c
|
||||
if (!table->isSystemStorage() && !DatabaseCatalog::isPredefinedDatabase(database_name))
|
||||
{
|
||||
LOG_TEST(log, "Counting attached table {} to database {}", table_name, database_name);
|
||||
CurrentMetrics::add(getAttachedCounterForStorage(table));
|
||||
for (auto metric : getAttachedCountersForStorage(table))
|
||||
CurrentMetrics::add(metric);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,6 +98,9 @@
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric AttachedTable;
|
||||
extern const Metric AttachedReplicatedTable;
|
||||
extern const Metric AttachedDictionary;
|
||||
extern const Metric AttachedView;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -145,7 +148,10 @@ namespace ServerSetting
|
||||
{
|
||||
extern const ServerSettingsBool ignore_empty_sql_security_in_create_view_query;
|
||||
extern const ServerSettingsUInt64 max_database_num_to_throw;
|
||||
extern const ServerSettingsUInt64 max_dictionary_num_to_throw;
|
||||
extern const ServerSettingsUInt64 max_table_num_to_throw;
|
||||
extern const ServerSettingsUInt64 max_replicated_table_num_to_throw;
|
||||
extern const ServerSettingsUInt64 max_view_num_to_throw;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
@ -1912,16 +1918,8 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
|
||||
}
|
||||
}
|
||||
|
||||
UInt64 table_num_limit = getContext()->getGlobalContext()->getServerSettings()[ServerSetting::max_table_num_to_throw];
|
||||
if (table_num_limit > 0 && !internal)
|
||||
{
|
||||
UInt64 table_count = CurrentMetrics::get(CurrentMetrics::AttachedTable);
|
||||
if (table_count >= table_num_limit)
|
||||
throw Exception(ErrorCodes::TOO_MANY_TABLES,
|
||||
"Too many tables. "
|
||||
"The limit (server configuration parameter `max_table_num_to_throw`) is set to {}, the current number of tables is {}",
|
||||
table_num_limit, table_count);
|
||||
}
|
||||
if (!internal)
|
||||
throwIfTooManyEntities(create, res);
|
||||
|
||||
database->createTable(getContext(), create.getTable(), res, query_ptr);
|
||||
|
||||
@ -1948,6 +1946,30 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
|
||||
}
|
||||
|
||||
|
||||
void InterpreterCreateQuery::throwIfTooManyEntities(ASTCreateQuery & create, StoragePtr storage) const
|
||||
{
|
||||
auto check_and_throw = [&](auto setting, CurrentMetrics::Metric metric, String setting_name, String entity_name)
|
||||
{
|
||||
UInt64 num_limit = getContext()->getGlobalContext()->getServerSettings()[setting];
|
||||
UInt64 attached_count = CurrentMetrics::get(metric);
|
||||
if (num_limit > 0 && attached_count >= num_limit)
|
||||
throw Exception(ErrorCodes::TOO_MANY_TABLES,
|
||||
"Too many {}. "
|
||||
"The limit (server configuration parameter `{}`) is set to {}, the current number is {}",
|
||||
entity_name, setting_name, num_limit, attached_count);
|
||||
};
|
||||
|
||||
if (auto * replicated_storage = typeid_cast<StorageReplicatedMergeTree *>(storage.get()))
|
||||
check_and_throw(ServerSetting::max_replicated_table_num_to_throw, CurrentMetrics::AttachedReplicatedTable, "max_replicated_table_num_to_throw", "replicated tables");
|
||||
else if (create.is_dictionary)
|
||||
check_and_throw(ServerSetting::max_dictionary_num_to_throw, CurrentMetrics::AttachedDictionary, "max_dictionary_num_to_throw", "dictionaries");
|
||||
else if (create.isView())
|
||||
check_and_throw(ServerSetting::max_view_num_to_throw, CurrentMetrics::AttachedView, "max_view_num_to_throw", "views");
|
||||
else
|
||||
check_and_throw(ServerSetting::max_table_num_to_throw, CurrentMetrics::AttachedTable, "max_table_num_to_throw", "tables");
|
||||
}
|
||||
|
||||
|
||||
BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create,
|
||||
const InterpreterCreateQuery::TableProperties & properties, LoadingStrictnessLevel mode)
|
||||
{
|
||||
|
@ -122,6 +122,8 @@ private:
|
||||
|
||||
BlockIO executeQueryOnCluster(ASTCreateQuery & create);
|
||||
|
||||
void throwIfTooManyEntities(ASTCreateQuery & create, StoragePtr storage) const;
|
||||
|
||||
ASTPtr query_ptr;
|
||||
|
||||
/// Skip safety threshold when loading tables.
|
||||
|
@ -1,10 +1,13 @@
|
||||
#include <vector>
|
||||
#include <Storages/Utils.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric AttachedTable;
|
||||
extern const Metric AttachedReplicatedTable;
|
||||
extern const Metric AttachedView;
|
||||
extern const Metric AttachedDictionary;
|
||||
}
|
||||
@ -12,17 +15,20 @@ namespace CurrentMetrics
|
||||
|
||||
namespace DB
|
||||
{
|
||||
CurrentMetrics::Metric getAttachedCounterForStorage(const StoragePtr & storage)
|
||||
std::vector<CurrentMetrics::Metric> getAttachedCountersForStorage(const StoragePtr & storage)
|
||||
{
|
||||
if (storage->isView())
|
||||
{
|
||||
return CurrentMetrics::AttachedView;
|
||||
return {CurrentMetrics::AttachedView};
|
||||
}
|
||||
if (storage->isDictionary())
|
||||
{
|
||||
return CurrentMetrics::AttachedDictionary;
|
||||
return {CurrentMetrics::AttachedDictionary};
|
||||
}
|
||||
|
||||
return CurrentMetrics::AttachedTable;
|
||||
if (auto * replicated_storage = typeid_cast<StorageReplicatedMergeTree *>(storage.get()))
|
||||
{
|
||||
return {CurrentMetrics::AttachedTable, CurrentMetrics::AttachedReplicatedTable};
|
||||
}
|
||||
return {CurrentMetrics::AttachedTable};
|
||||
}
|
||||
}
|
||||
|
@ -6,5 +6,5 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
CurrentMetrics::Metric getAttachedCounterForStorage(const StoragePtr & storage);
|
||||
std::vector<CurrentMetrics::Metric> getAttachedCountersForStorage(const StoragePtr & storage);
|
||||
}
|
||||
|
@ -1,4 +1,20 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>node2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
|
||||
<max_dictionary_num_to_throw>10</max_dictionary_num_to_throw>
|
||||
<max_table_num_to_throw>10</max_table_num_to_throw>
|
||||
<max_database_num_to_throw>10</max_database_num_to_throw>
|
||||
</clickhouse>
|
||||
|
@ -0,0 +1,4 @@
|
||||
<clickhouse>
|
||||
<max_replicated_table_num_to_throw>5</max_replicated_table_num_to_throw>
|
||||
</clickhouse>
|
||||
|
@ -0,0 +1,4 @@
|
||||
<clickhouse>
|
||||
<max_replicated_table_num_to_throw>3</max_replicated_table_num_to_throw>
|
||||
</clickhouse>
|
||||
|
@ -1,11 +1,22 @@
|
||||
import pytest
|
||||
|
||||
from helpers.client import QueryRuntimeException
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node = cluster.add_instance("node", main_configs=["config/config.xml"])
|
||||
node = cluster.add_instance(
|
||||
"node1",
|
||||
with_zookeeper=True,
|
||||
macros={"replica": "r1"},
|
||||
main_configs=["config/config.xml", "config/config1.xml"],
|
||||
)
|
||||
|
||||
node2 = cluster.add_instance(
|
||||
"node2",
|
||||
with_zookeeper=True,
|
||||
macros={"replica": "r2"},
|
||||
main_configs=["config/config.xml", "config/config2.xml"],
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@ -24,10 +35,9 @@ def test_table_db_limit(started_cluster):
|
||||
for i in range(9):
|
||||
node.query("create database db{}".format(i))
|
||||
|
||||
with pytest.raises(QueryRuntimeException) as exp_info:
|
||||
node.query("create database db_exp".format(i))
|
||||
|
||||
assert "TOO_MANY_DATABASES" in str(exp_info)
|
||||
assert "TOO_MANY_DATABASES" in node.query_and_get_error(
|
||||
"create database db_exp".format(i)
|
||||
)
|
||||
|
||||
for i in range(10):
|
||||
node.query("create table t{} (a Int32) Engine = Log".format(i))
|
||||
@ -35,13 +45,72 @@ def test_table_db_limit(started_cluster):
|
||||
# This checks that system tables are not accounted in the number of tables.
|
||||
node.query("system flush logs")
|
||||
|
||||
# Regular tables
|
||||
for i in range(10):
|
||||
node.query("drop table t{}".format(i))
|
||||
|
||||
for i in range(10):
|
||||
node.query("create table t{} (a Int32) Engine = Log".format(i))
|
||||
|
||||
with pytest.raises(QueryRuntimeException) as exp_info:
|
||||
node.query("create table default.tx (a Int32) Engine = Log")
|
||||
assert "TOO_MANY_TABLES" in node.query_and_get_error(
|
||||
"create table default.tx (a Int32) Engine = Log"
|
||||
)
|
||||
|
||||
assert "TOO_MANY_TABLES" in str(exp_info)
|
||||
# Dictionaries
|
||||
for i in range(10):
|
||||
node.query(
|
||||
"create dictionary d{} (a Int32) primary key a source(null()) layout(flat()) lifetime(1000)".format(
|
||||
i
|
||||
)
|
||||
)
|
||||
|
||||
assert "TOO_MANY_TABLES" in node.query_and_get_error(
|
||||
"create dictionary dx (a Int32) primary key a source(null()) layout(flat()) lifetime(1000)"
|
||||
)
|
||||
|
||||
# Replicated tables
|
||||
for i in range(10):
|
||||
node.query("drop table t{}".format(i))
|
||||
|
||||
for i in range(3):
|
||||
node.query(
|
||||
"create table t{} on cluster 'cluster' (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/t{}', '{{replica}}') order by a".format(
|
||||
i, i
|
||||
)
|
||||
)
|
||||
|
||||
# Test limit on other replica
|
||||
assert "Too many replicated tables" in node2.query_and_get_error(
|
||||
"create table tx (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/tx', '{replica}') order by a"
|
||||
)
|
||||
|
||||
for i in range(3, 5):
|
||||
node.query(
|
||||
"create table t{} (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/t{}', '{{replica}}') order by a".format(
|
||||
i, i
|
||||
)
|
||||
)
|
||||
|
||||
assert "Too many replicated tables" in node.query_and_get_error(
|
||||
"create table tx (a Int32) Engine = ReplicatedMergeTree('/clickhouse/tables/tx', '{replica}') order by a"
|
||||
)
|
||||
|
||||
# Checks that replicated tables are also counted as regular tables
|
||||
for i in range(5, 10):
|
||||
node.query("create table t{} (a Int32) Engine = Log".format(i))
|
||||
|
||||
assert "TOO_MANY_TABLES" in node.query_and_get_error(
|
||||
"create table tx (a Int32) Engine = Log"
|
||||
)
|
||||
|
||||
# Cleanup
|
||||
for i in range(10):
|
||||
node.query("drop table t{} sync".format(i))
|
||||
for i in range(3):
|
||||
node2.query("drop table t{} sync".format(i))
|
||||
node.query("system drop replica 'r1' from ZKPATH '/clickhouse/tables/tx'")
|
||||
node.query("system drop replica 'r2' from ZKPATH '/clickhouse/tables/tx'")
|
||||
for i in range(9):
|
||||
node.query("drop database db{}".format(i))
|
||||
for i in range(10):
|
||||
node.query("drop dictionary d{}".format(i))
|
||||
|
Loading…
Reference in New Issue
Block a user