Merge pull request #2856 from zhang2014/feature/add_check_access_database_for_system_tables

Add check access database for system tables
This commit is contained in:
alexey-milovidov 2018-08-14 12:53:43 +03:00 committed by GitHub
commit f42bd36fa0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 114 additions and 37 deletions

View File

@ -618,6 +618,13 @@ void Context::checkDatabaseAccessRights(const std::string & database_name) const
checkDatabaseAccessRightsImpl(database_name);
}
bool Context::hasDatabaseAccessRights(const String & database_name) const
{
auto lock = getLock();
return client_info.current_user.empty() || (database_name == "system") ||
shared->security_manager->hasAccessToDatabase(client_info.current_user, database_name);
}
void Context::checkDatabaseAccessRightsImpl(const std::string & database_name) const
{
if (client_info.current_user.empty() || (database_name == "system"))
@ -1793,6 +1800,7 @@ std::shared_ptr<ActionLocksManager> Context::getActionLocksManager()
return shared->action_locks_manager;
}
SessionCleaner::~SessionCleaner()
{
try

View File

@ -180,6 +180,7 @@ public:
bool isTableExist(const String & database_name, const String & table_name) const;
bool isDatabaseExist(const String & database_name) const;
bool isExternalTableExist(const String & table_name) const;
bool hasDatabaseAccessRights(const String & database_name) const;
void assertTableExists(const String & database_name, const String & table_name) const;
/** The parameter check_database_access_rights exists to not check the permissions of the database again,

View File

@ -42,7 +42,11 @@ void StorageSystemColumns::fillData(MutableColumns & res_columns, const Context
/// Add `database` column.
MutableColumnPtr database_column_mut = ColumnString::create();
for (const auto & database : databases)
database_column_mut->insert(database.first);
{
if (context.hasDatabaseAccessRights(database.first))
database_column_mut->insert(database.first);
}
block_to_filter.insert(ColumnWithTypeAndName(std::move(database_column_mut), std::make_shared<DataTypeString>(), "database"));
/// Filter block with `database` column.

View File

@ -22,10 +22,13 @@ void StorageSystemDatabases::fillData(MutableColumns & res_columns, const Contex
auto databases = context.getDatabases();
for (const auto & database : databases)
{
res_columns[0]->insert(database.first);
res_columns[1]->insert(database.second->getEngineName());
res_columns[2]->insert(database.second->getDataPath());
res_columns[3]->insert(database.second->getMetadataPath());
if (context.hasDatabaseAccessRights(database.first))
{
res_columns[0]->insert(database.first);
res_columns[1]->insert(database.second->getEngineName());
res_columns[2]->insert(database.second->getDataPath());
res_columns[3]->insert(database.second->getMetadataPath());
}
}
}

View File

@ -33,23 +33,26 @@ void StorageSystemMerges::fillData(MutableColumns & res_columns, const Context &
{
for (const auto & merge : context.getMergeList().get())
{
size_t i = 0;
res_columns[i++]->insert(merge.database);
res_columns[i++]->insert(merge.table);
res_columns[i++]->insert(merge.elapsed);
res_columns[i++]->insert(merge.progress);
res_columns[i++]->insert(merge.num_parts);
res_columns[i++]->insert(merge.source_part_names);
res_columns[i++]->insert(merge.result_part_name);
res_columns[i++]->insert(merge.total_size_bytes_compressed);
res_columns[i++]->insert(merge.total_size_marks);
res_columns[i++]->insert(merge.bytes_read_uncompressed);
res_columns[i++]->insert(merge.rows_read);
res_columns[i++]->insert(merge.bytes_written_uncompressed);
res_columns[i++]->insert(merge.rows_written);
res_columns[i++]->insert(merge.columns_written);
res_columns[i++]->insert(merge.memory_usage);
res_columns[i++]->insert(merge.thread_number);
if (context.hasDatabaseAccessRights(merge.database))
{
size_t i = 0;
res_columns[i++]->insert(merge.database);
res_columns[i++]->insert(merge.table);
res_columns[i++]->insert(merge.elapsed);
res_columns[i++]->insert(merge.progress);
res_columns[i++]->insert(merge.num_parts);
res_columns[i++]->insert(merge.source_part_names);
res_columns[i++]->insert(merge.result_part_name);
res_columns[i++]->insert(merge.total_size_bytes_compressed);
res_columns[i++]->insert(merge.total_size_marks);
res_columns[i++]->insert(merge.bytes_read_uncompressed);
res_columns[i++]->insert(merge.rows_read);
res_columns[i++]->insert(merge.bytes_written_uncompressed);
res_columns[i++]->insert(merge.rows_written);
res_columns[i++]->insert(merge.columns_written);
res_columns[i++]->insert(merge.memory_usage);
res_columns[i++]->insert(merge.thread_number);
}
}
}

View File

@ -38,12 +38,15 @@ void StorageSystemMutations::fillData(MutableColumns & res_columns, const Contex
std::map<String, std::map<String, StoragePtr>> merge_tree_tables;
for (const auto & db : context.getDatabases())
{
for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next())
if (context.hasDatabaseAccessRights(db.first))
{
if (dynamic_cast<const StorageMergeTree *>(iterator->table().get())
|| dynamic_cast<const StorageReplicatedMergeTree *>(iterator->table().get()))
for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next())
{
merge_tree_tables[db.first][iterator->name()] = iterator->table();
if (dynamic_cast<const StorageMergeTree *>(iterator->table().get())
|| dynamic_cast<const StorageReplicatedMergeTree *>(iterator->table().get()))
{
merge_tree_tables[db.first][iterator->name()] = iterator->table();
}
}
}
}

View File

@ -59,7 +59,10 @@ public:
/// Add column 'database'.
MutableColumnPtr database_column_mut = ColumnString::create();
for (const auto & database : databases)
database_column_mut->insert(database.first);
{
if (context.hasDatabaseAccessRights(database.first))
database_column_mut->insert(database.first);
}
block_to_filter.insert(ColumnWithTypeAndName(
std::move(database_column_mut), std::make_shared<DataTypeString>(), "database"));

View File

@ -65,9 +65,15 @@ BlockInputStreams StorageSystemReplicas::read(
/// We collect a set of replicated tables.
std::map<String, std::map<String, StoragePtr>> replicated_tables;
for (const auto & db : context.getDatabases())
for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next())
if (dynamic_cast<const StorageReplicatedMergeTree *>(iterator->table().get()))
replicated_tables[db.first][iterator->name()] = iterator->table();
{
if (context.hasDatabaseAccessRights(db.first))
{
for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next())
if (dynamic_cast<const StorageReplicatedMergeTree *>(iterator->table().get()))
replicated_tables[db.first][iterator->name()] = iterator->table();
}
}
/// Do you need columns that require a walkthrough in ZooKeeper to compute.
bool with_zk_fields = false;

View File

@ -50,9 +50,15 @@ void StorageSystemReplicationQueue::fillData(MutableColumns & res_columns, const
{
std::map<String, std::map<String, StoragePtr>> replicated_tables;
for (const auto & db : context.getDatabases())
for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next())
if (dynamic_cast<const StorageReplicatedMergeTree *>(iterator->table().get()))
replicated_tables[db.first][iterator->name()] = iterator->table();
{
if (context.hasDatabaseAccessRights(db.first))
{
for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next())
if (dynamic_cast<const StorageReplicatedMergeTree *>(iterator->table().get()))
replicated_tables[db.first][iterator->name()] = iterator->table();
}
}
MutableColumnPtr col_database_mut = ColumnString::create();
MutableColumnPtr col_table_mut = ColumnString::create();

View File

@ -95,7 +95,7 @@ BlockInputStreams StorageSystemTables::read(
auto database = context.tryGetDatabase(database_name);
if (!database)
if (!database || !context.hasDatabaseAccessRights(database_name))
{
/// Database was deleted just now.
continue;

View File

@ -11,11 +11,11 @@ class Client:
self.command = [command, '--host', self.host, '--port', str(self.port), '--stacktrace']
def query(self, sql, stdin=None, timeout=None, settings=None):
return self.get_query_request(sql, stdin=stdin, timeout=timeout, settings=settings).get_answer()
def query(self, sql, stdin=None, timeout=None, settings=None, user=None):
return self.get_query_request(sql, stdin=stdin, timeout=timeout, settings=settings, user=user).get_answer()
def get_query_request(self, sql, stdin=None, timeout=None, settings=None):
def get_query_request(self, sql, stdin=None, timeout=None, settings=None, user=None):
command = self.command[:]
if stdin is None:
@ -28,6 +28,9 @@ class Client:
for setting, value in settings.iteritems():
command += ['--' + setting, str(value)]
if user is not None:
command += ['--user', user]
return CommandRequest(command, stdin, timeout)

View File

@ -0,0 +1,20 @@
<yandex>
<users>
<default>
<password></password>
<profile>default</profile>
<quota>default</quota>
</default>
<test_allow>
<password></password>
<profile>default</profile>
<quota>default</quota>
<networks>
<ip>::/0</ip>
</networks>
<allow_databases>
<database>default</database>
</allow_databases>
</test_allow>
</users>
</yandex>

View File

@ -8,6 +8,7 @@ node1 = cluster.add_instance('node1', main_configs=['configs/config_no_substs.xm
node2 = cluster.add_instance('node2', main_configs=['configs/config_env.xml'], env_variables={"MAX_QUERY_SIZE": "55555"})
node3 = cluster.add_instance('node3', main_configs=['configs/config_zk.xml'], with_zookeeper=True)
node4 = cluster.add_instance('node4', main_configs=['configs/config_incl.xml', 'configs/max_query_size.xml']) # include value 77777
node5 = cluster.add_instance('node5', main_configs=['configs/config_allow_databases.xml'])
@pytest.fixture(scope="module")
def start_cluster():
@ -26,3 +27,19 @@ def test_config(start_cluster):
assert node2.query("select value from system.settings where name = 'max_query_size'") == "55555\n"
assert node3.query("select value from system.settings where name = 'max_query_size'") == "77777\n"
assert node4.query("select value from system.settings where name = 'max_query_size'") == "99999\n"
def test_allow_databases(start_cluster):
node5.query("CREATE DATABASE db1")
node5.query("CREATE TABLE db1.test_table(date Date, k1 String, v1 Int32) ENGINE = MergeTree(date, (k1, date), 8192)")
node5.query("INSERT INTO db1.test_table VALUES('2000-01-01', 'test_key', 1)")
assert node5.query("SELECT name FROM system.databases WHERE name = 'db1'") == "db1\n"
assert node5.query("SELECT name FROM system.tables WHERE database = 'db1' AND name = 'test_table' ") == "test_table\n"
assert node5.query("SELECT name FROM system.columns WHERE database = 'db1' AND table = 'test_table'") == "date\nk1\nv1\n"
assert node5.query("SELECT name FROM system.parts WHERE database = 'db1' AND table = 'test_table'") == "20000101_20000101_1_1_0\n"
assert node5.query("SELECT name FROM system.parts_columns WHERE database = 'db1' AND table = 'test_table'") == "20000101_20000101_1_1_0\n20000101_20000101_1_1_0\n20000101_20000101_1_1_0\n"
assert node5.query("SELECT name FROM system.databases WHERE name = 'db1'", user="test_allow") == "\n"
assert node5.query("SELECT name FROM system.tables WHERE database = 'db1' AND name = 'test_table'", user="test_allow") == "\n"
assert node5.query("SELECT name FROM system.columns WHERE database = 'db1' AND table = 'test_table'", user="test_allow") == "\n"
assert node5.query("SELECT name FROM system.parts WHERE database = 'db1' AND table = 'test_table'", user="test_allow") == "\n"
assert node5.query("SELECT name FROM system.parts_columns WHERE database = 'db1' AND table = 'test_table'", user="test_allow") == "\n"