mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
Merge branch 'master' into fix-misleading-naming-in-joins
This commit is contained in:
commit
7c0e50c4d5
@ -76,7 +76,8 @@ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-serv
|
||||
# But we still need default disk because some tables loaded only into it
|
||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
|
@ -54,7 +54,7 @@ $ sudo mysql
|
||||
|
||||
``` sql
|
||||
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
|
||||
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION;
|
||||
mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'localhost' WITH GRANT OPTION;
|
||||
```
|
||||
|
||||
Then configure the connection in `/etc/odbc.ini`.
|
||||
@ -66,7 +66,7 @@ DRIVER = /usr/local/lib/libmyodbc5w.so
|
||||
SERVER = 127.0.0.1
|
||||
PORT = 3306
|
||||
DATABASE = test
|
||||
USERNAME = clickhouse
|
||||
USER = clickhouse
|
||||
PASSWORD = clickhouse
|
||||
```
|
||||
|
||||
@ -83,6 +83,9 @@ $ isql -v mysqlconn
|
||||
Table in MySQL:
|
||||
|
||||
``` text
|
||||
mysql> CREATE DATABASE test;
|
||||
Query OK, 1 row affected (0,01 sec)
|
||||
|
||||
mysql> CREATE TABLE `test`.`test` (
|
||||
-> `int_id` INT NOT NULL AUTO_INCREMENT,
|
||||
-> `int_nullable` INT NULL DEFAULT NULL,
|
||||
@ -91,10 +94,10 @@ mysql> CREATE TABLE `test`.`test` (
|
||||
-> PRIMARY KEY (`int_id`));
|
||||
Query OK, 0 rows affected (0,09 sec)
|
||||
|
||||
mysql> insert into test (`int_id`, `float`) VALUES (1,2);
|
||||
mysql> insert into test.test (`int_id`, `float`) VALUES (1,2);
|
||||
Query OK, 1 row affected (0,00 sec)
|
||||
|
||||
mysql> select * from test;
|
||||
mysql> select * from test.test;
|
||||
+------+----------+-----+----------+
|
||||
| int_id | int_nullable | float | float_nullable |
|
||||
+------+----------+-----+----------+
|
||||
|
@ -3201,6 +3201,40 @@ ENGINE = Log
|
||||
└──────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## default_temporary_table_engine {#default_temporary_table_engine}
|
||||
|
||||
Same as [default_table_engine](#default_table_engine) but for temporary tables.
|
||||
|
||||
Default value: `Memory`.
|
||||
|
||||
In this example, any new temporary table that does not specify an `Engine` will use the `Log` table engine:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET default_temporary_table_engine = 'Log';
|
||||
|
||||
CREATE TEMPORARY TABLE my_table (
|
||||
x UInt32,
|
||||
y UInt32
|
||||
);
|
||||
|
||||
SHOW CREATE TEMPORARY TABLE my_table;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─statement────────────────────────────────────────────────────────────────┐
|
||||
│ CREATE TEMPORARY TABLE default.my_table
|
||||
(
|
||||
`x` UInt32,
|
||||
`y` UInt32
|
||||
)
|
||||
ENGINE = Log
|
||||
└──────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## data_type_default_nullable {#data_type_default_nullable}
|
||||
|
||||
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
|
||||
|
@ -9,7 +9,6 @@ Columns:
|
||||
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Event time with microseconds resolution.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||
|
||||
@ -20,18 +19,18 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
|
||||
└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
|
||||
└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -8,7 +8,6 @@ slug: /ru/operations/system-tables/asynchronous_metric_log
|
||||
Столбцы:
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата события.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время события.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время события в микросекундах.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — название метрики.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
|
||||
|
||||
|
@ -8,7 +8,6 @@ slug: /zh/operations/system-tables/asynchronous_metric_log
|
||||
列:
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件日期。
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间。
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件时间(微秒)。
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — 指标名。
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值。
|
||||
|
||||
@ -17,18 +16,18 @@ slug: /zh/operations/system-tables/asynchronous_metric_log
|
||||
SELECT * FROM system.asynchronous_metric_log LIMIT 10
|
||||
```
|
||||
``` text
|
||||
┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
|
||||
└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||
┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
|
||||
│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
|
||||
└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
|
@ -192,7 +192,7 @@ SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook
|
||||
**返回值**
|
||||
|
||||
- 如果`x`不为`NULL`,返回非`Nullable`类型的原始值。
|
||||
- 如果`x`为`NULL`,返回对应非`Nullable`类型的默认值。
|
||||
- 如果`x`为`NULL`,则返回任意值。
|
||||
|
||||
**示例**
|
||||
|
||||
|
@ -8,7 +8,9 @@
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/NullChannel.h>
|
||||
#include <Poco/SimpleFileChannel.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
@ -50,6 +52,8 @@
|
||||
#include <base/argsToConfig.h>
|
||||
#include <filesystem>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if defined(FUZZING_MODE)
|
||||
#include <Functions/getFuzzerData.h>
|
||||
#endif
|
||||
@ -170,6 +174,13 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
|
||||
return system_database;
|
||||
}
|
||||
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
|
||||
{
|
||||
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
|
||||
return databaseCombiner;
|
||||
}
|
||||
|
||||
/// If path is specified and not empty, will try to setup server environment and load existing metadata
|
||||
void LocalServer::tryInitPath()
|
||||
@ -669,7 +680,7 @@ void LocalServer::processConfig()
|
||||
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
|
||||
*/
|
||||
std::string default_database = config().getString("default_database", "_local");
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, global_context));
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
|
||||
global_context->setCurrentDatabase(default_database);
|
||||
applyCmdOptions(global_context);
|
||||
|
||||
|
@ -12,7 +12,8 @@
|
||||
--chart-background: white;
|
||||
--shadow-color: rgba(0, 0, 0, 0.25);
|
||||
--input-shadow-color: rgba(0, 255, 0, 1);
|
||||
--error-color: white;
|
||||
--error-color: red;
|
||||
--auth-error-color: white;
|
||||
--legend-background: rgba(255, 255, 255, 0.75);
|
||||
--title-color: #666;
|
||||
--text-color: black;
|
||||
@ -258,7 +259,7 @@
|
||||
width: 60%;
|
||||
padding: .5rem;
|
||||
|
||||
color: var(--error-color);
|
||||
color: var(--auth-error-color);
|
||||
|
||||
display: flex;
|
||||
flex-flow: row nowrap;
|
||||
@ -906,9 +907,9 @@ async function draw(idx, chart, url_params, query) {
|
||||
|
||||
if (error) {
|
||||
const errorMatch = errorMessages.find(({ regex }) => error.match(regex))
|
||||
if (errorMatch) {
|
||||
const match = error.match(errorMatch.regex)
|
||||
const message = errorMatch.messageFunc(match)
|
||||
if (message) {
|
||||
const authError = new Error(message)
|
||||
throw authError
|
||||
}
|
||||
@ -1019,13 +1020,15 @@ async function drawAll() {
|
||||
firstLoad = false;
|
||||
} else {
|
||||
enableReloadButton();
|
||||
enableRunButton();
|
||||
}
|
||||
if (!results.includes(false)) {
|
||||
if (results.includes(true)) {
|
||||
const element = document.querySelector('.inputs');
|
||||
element.classList.remove('unconnected');
|
||||
const add = document.querySelector('#add');
|
||||
add.style.display = 'block';
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
const charts = document.querySelector('#charts')
|
||||
charts.style.height = '0px';
|
||||
}
|
||||
@ -1050,6 +1053,13 @@ function disableReloadButton() {
|
||||
reloadButton.classList.add('disabled')
|
||||
}
|
||||
|
||||
function disableRunButton() {
|
||||
const runButton = document.getElementById('run')
|
||||
runButton.value = 'Reloading...'
|
||||
runButton.disabled = true
|
||||
runButton.classList.add('disabled')
|
||||
}
|
||||
|
||||
function enableReloadButton() {
|
||||
const reloadButton = document.getElementById('reload')
|
||||
reloadButton.value = 'Reload'
|
||||
@ -1057,11 +1067,19 @@ function enableReloadButton() {
|
||||
reloadButton.classList.remove('disabled')
|
||||
}
|
||||
|
||||
function enableRunButton() {
|
||||
const runButton = document.getElementById('run')
|
||||
runButton.value = 'Ok'
|
||||
runButton.disabled = false
|
||||
runButton.classList.remove('disabled')
|
||||
}
|
||||
|
||||
function reloadAll() {
|
||||
updateParams();
|
||||
drawAll();
|
||||
saveState();
|
||||
disableReloadButton()
|
||||
disableReloadButton();
|
||||
disableRunButton();
|
||||
}
|
||||
|
||||
document.getElementById('params').onsubmit = function(event) {
|
||||
|
@ -155,7 +155,7 @@ namespace
|
||||
|
||||
|
||||
AccessRightsElement::AccessRightsElement(AccessFlags access_flags_, std::string_view database_)
|
||||
: access_flags(access_flags_), database(database_), any_database(false)
|
||||
: access_flags(access_flags_), database(database_), parameter(database_), any_database(false), any_parameter(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ enum class AccessType
|
||||
M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \
|
||||
\
|
||||
M(ALTER_DATABASE_SETTINGS, "ALTER DATABASE SETTING, ALTER MODIFY DATABASE SETTING, MODIFY DATABASE SETTING", DATABASE, ALTER_DATABASE) /* allows to execute ALTER MODIFY SETTING */\
|
||||
M(ALTER_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute ALTER NAMED COLLECTION */\
|
||||
M(ALTER_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute ALTER NAMED COLLECTION */\
|
||||
\
|
||||
M(ALTER_TABLE, "", GROUP, ALTER) \
|
||||
M(ALTER_DATABASE, "", GROUP, ALTER) \
|
||||
@ -92,7 +92,7 @@ enum class AccessType
|
||||
M(CREATE_ARBITRARY_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables
|
||||
with arbitrary table engine */\
|
||||
M(CREATE_FUNCTION, "", GLOBAL, CREATE) /* allows to execute CREATE FUNCTION */ \
|
||||
M(CREATE_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute CREATE NAMED COLLECTION */ \
|
||||
M(CREATE_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute CREATE NAMED COLLECTION */ \
|
||||
M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \
|
||||
\
|
||||
M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\
|
||||
@ -101,7 +101,7 @@ enum class AccessType
|
||||
implicitly enabled by the grant DROP_TABLE */\
|
||||
M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\
|
||||
M(DROP_FUNCTION, "", GLOBAL, DROP) /* allows to execute DROP FUNCTION */\
|
||||
M(DROP_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute DROP NAMED COLLECTION */\
|
||||
M(DROP_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute DROP NAMED COLLECTION */\
|
||||
M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\
|
||||
\
|
||||
M(UNDROP_TABLE, "", TABLE, ALL) /* allows to execute {UNDROP} TABLE */\
|
||||
@ -140,9 +140,10 @@ enum class AccessType
|
||||
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
|
||||
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
|
||||
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
|
||||
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) \
|
||||
M(SHOW_NAMED_COLLECTIONS_SECRETS, "SHOW NAMED COLLECTIONS SECRETS", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) \
|
||||
M(NAMED_COLLECTION_CONTROL, "", NAMED_COLLECTION, ALL) \
|
||||
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
|
||||
M(SHOW_NAMED_COLLECTIONS_SECRETS, "SHOW NAMED COLLECTIONS SECRETS", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
|
||||
M(NAMED_COLLECTION, "NAMED COLLECTION USAGE, USE NAMED COLLECTION", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
|
||||
M(NAMED_COLLECTION_ADMIN, "NAMED COLLECTION CONTROL", NAMED_COLLECTION, ALL) \
|
||||
\
|
||||
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \
|
||||
M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
|
@ -328,7 +328,7 @@ namespace
|
||||
|
||||
if (!named_collection_control)
|
||||
{
|
||||
user->access.revoke(AccessType::NAMED_COLLECTION_CONTROL);
|
||||
user->access.revoke(AccessType::NAMED_COLLECTION_ADMIN);
|
||||
}
|
||||
|
||||
if (!show_named_collections_secrets)
|
||||
|
@ -53,7 +53,7 @@ TEST(AccessRights, Union)
|
||||
"SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, "
|
||||
"SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, "
|
||||
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
|
||||
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION CONTROL ON db1");
|
||||
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION ADMIN ON db1");
|
||||
}
|
||||
|
||||
|
||||
|
@ -2297,7 +2297,9 @@ void ClientBase::runInteractive()
|
||||
catch (const ErrnoException & e)
|
||||
{
|
||||
if (e.getErrno() != EEXIST)
|
||||
throw;
|
||||
{
|
||||
std::cerr << getCurrentExceptionMessage(false) << '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,15 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
||||
quota_key = config.getString("quota_key", "");
|
||||
|
||||
/// By default compression is disabled if address looks like localhost.
|
||||
compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host)))
|
||||
|
||||
/// Avoid DNS request if the host is "localhost".
|
||||
/// If ClickHouse is run under QEMU-user with a binary for a different architecture,
|
||||
/// and there are all listed startup dependency shared libraries available, but not the runtime dependencies of glibc,
|
||||
/// the glibc cannot open "plugins" for DNS resolving, and the DNS resolution does not work.
|
||||
/// At the same time, I want clickhouse-local to always work, regardless.
|
||||
/// TODO: get rid of glibc, or replace getaddrinfo to c-ares.
|
||||
|
||||
compression = config.getBool("compression", host != "localhost" && !isLocalAddress(DNSResolver::instance().resolveHost(host)))
|
||||
? Protocol::Compression::Enable : Protocol::Compression::Disable;
|
||||
|
||||
timeouts = ConnectionTimeouts(
|
||||
|
@ -540,7 +540,7 @@ bool OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject, si
|
||||
}
|
||||
}
|
||||
|
||||
return re2->Match(StringPieceType(subject, subject_size), 0, subject_size, RegexType::UNANCHORED, nullptr, 0);
|
||||
return re2->Match({subject, subject_size}, 0, subject_size, RegexType::UNANCHORED, nullptr, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -585,9 +585,9 @@ bool OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject, si
|
||||
return false;
|
||||
}
|
||||
|
||||
StringPieceType piece;
|
||||
std::string_view piece;
|
||||
|
||||
if (!RegexType::PartialMatch(StringPieceType(subject, subject_size), *re2, &piece))
|
||||
if (!RegexType::PartialMatch({subject, subject_size}, *re2, &piece))
|
||||
return false;
|
||||
else
|
||||
{
|
||||
@ -652,10 +652,10 @@ unsigned OptimizedRegularExpressionImpl<thread_safe>::match(const char * subject
|
||||
return 0;
|
||||
}
|
||||
|
||||
DB::PODArrayWithStackMemory<StringPieceType, 128> pieces(limit);
|
||||
DB::PODArrayWithStackMemory<std::string_view, 128> pieces(limit);
|
||||
|
||||
if (!re2->Match(
|
||||
StringPieceType(subject, subject_size),
|
||||
{subject, subject_size},
|
||||
0,
|
||||
subject_size,
|
||||
RegexType::UNANCHORED,
|
||||
|
@ -52,7 +52,6 @@ public:
|
||||
using MatchVec = std::vector<Match>;
|
||||
|
||||
using RegexType = std::conditional_t<thread_safe, re2::RE2, re2_st::RE2>;
|
||||
using StringPieceType = std::conditional_t<thread_safe, re2::StringPiece, re2_st::StringPiece>;
|
||||
|
||||
OptimizedRegularExpressionImpl(const std::string & regexp_, int options = 0); /// NOLINT
|
||||
/// StringSearcher store pointers to required_substring, it must be updated on move.
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <atomic>
|
||||
|
||||
#include <re2/re2.h>
|
||||
#include <re2/stringpiece.h>
|
||||
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
|
||||
@ -44,7 +43,7 @@ private:
|
||||
const std::string regexp_string;
|
||||
|
||||
const RE2 regexp;
|
||||
const re2::StringPiece replacement;
|
||||
const std::string_view replacement;
|
||||
|
||||
#ifndef NDEBUG
|
||||
mutable std::atomic<std::uint64_t> matches_count = 0;
|
||||
|
@ -27,7 +27,7 @@ static thread_local size_t max_stack_size = 0;
|
||||
* @param out_address - if not nullptr, here the address of the stack will be written.
|
||||
* @return stack size
|
||||
*/
|
||||
size_t getStackSize(void ** out_address)
|
||||
static size_t getStackSize(void ** out_address)
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
@ -54,7 +54,15 @@ size_t getStackSize(void ** out_address)
|
||||
throwFromErrno("Cannot pthread_attr_get_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
# else
|
||||
if (0 != pthread_getattr_np(pthread_self(), &attr))
|
||||
{
|
||||
if (errno == ENOENT)
|
||||
{
|
||||
/// Most likely procfs is not mounted.
|
||||
return 0;
|
||||
}
|
||||
else
|
||||
throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
|
||||
}
|
||||
# endif
|
||||
|
||||
SCOPE_EXIT({ pthread_attr_destroy(&attr); });
|
||||
@ -83,6 +91,10 @@ __attribute__((__weak__)) void checkStackSize()
|
||||
if (!stack_address)
|
||||
max_stack_size = getStackSize(&stack_address);
|
||||
|
||||
/// The check is impossible.
|
||||
if (!max_stack_size)
|
||||
return;
|
||||
|
||||
const void * frame_address = __builtin_frame_address(0);
|
||||
uintptr_t int_frame_address = reinterpret_cast<uintptr_t>(frame_address);
|
||||
uintptr_t int_stack_address = reinterpret_cast<uintptr_t>(stack_address);
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <re2/re2.h>
|
||||
#include <re2/stringpiece.h>
|
||||
#include <algorithm>
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
@ -33,14 +32,14 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob
|
||||
std::string escaped_with_globs = buf_for_escaping.str();
|
||||
|
||||
static const re2::RE2 enum_or_range(R"({([\d]+\.\.[\d]+|[^{}*,]+,[^{}*]*[^{}*,])})"); /// regexp for {expr1,expr2,expr3} or {M..N}, where M and N - non-negative integers, expr's should be without "{", "}", "*" and ","
|
||||
re2::StringPiece input(escaped_with_globs);
|
||||
re2::StringPiece matched;
|
||||
std::string_view input(escaped_with_globs);
|
||||
std::string_view matched;
|
||||
std::ostringstream oss_for_replacing; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
oss_for_replacing.exceptions(std::ios::failbit);
|
||||
size_t current_index = 0;
|
||||
while (RE2::FindAndConsume(&input, enum_or_range, &matched))
|
||||
{
|
||||
std::string buffer{matched};
|
||||
std::string buffer(matched);
|
||||
oss_for_replacing << escaped_with_globs.substr(current_index, matched.data() - escaped_with_globs.data() - current_index - 1) << '(';
|
||||
|
||||
if (buffer.find(',') == std::string::npos)
|
||||
|
@ -517,6 +517,7 @@ class IColumn;
|
||||
M(Seconds, wait_for_window_view_fire_signal_timeout, 10, "Timeout for waiting for window view fire signal in event time processing", 0) \
|
||||
M(UInt64, min_free_disk_space_for_temporary_data, 0, "The minimum disk space to keep while writing temporary data used in external sorting and aggregation.", 0) \
|
||||
\
|
||||
M(DefaultTableEngine, default_temporary_table_engine, DefaultTableEngine::Memory, "Default table engine used when ENGINE is not set in CREATE TEMPORARY statement.",0) \
|
||||
M(DefaultTableEngine, default_table_engine, DefaultTableEngine::None, "Default table engine used when ENGINE is not set in CREATE statement.",0) \
|
||||
M(Bool, show_table_uuid_in_table_create_query_if_not_nil, false, "For tables in databases with Engine=Atomic show UUID of the table in its CREATE query.", 0) \
|
||||
M(Bool, database_atomic_wait_for_drop_and_detach_synchronously, false, "When executing DROP or DETACH TABLE in Atomic database, wait for table data to be finally dropped or detached.", 0) \
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <filesystem>
|
||||
#include <Databases/DatabaseAtomic.h>
|
||||
#include <Databases/DatabaseDictionary.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseLazy.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabaseOrdinary.h>
|
||||
@ -47,6 +48,14 @@
|
||||
#include <Databases/SQLite/DatabaseSQLite.h>
|
||||
#endif
|
||||
|
||||
#if USE_AWS_S3
|
||||
#include <Databases/DatabaseS3.h>
|
||||
#endif
|
||||
|
||||
#if USE_HDFS
|
||||
#include <Databases/DatabaseHDFS.h>
|
||||
#endif
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
@ -131,13 +140,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
|
||||
static const std::unordered_set<std::string_view> database_engines{"Ordinary", "Atomic", "Memory",
|
||||
"Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL",
|
||||
"PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
|
||||
"PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
|
||||
|
||||
if (!database_engines.contains(engine_name))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name);
|
||||
|
||||
static const std::unordered_set<std::string_view> engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL",
|
||||
"Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
|
||||
"Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
|
||||
|
||||
static const std::unordered_set<std::string_view> engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"};
|
||||
bool engine_may_have_arguments = engines_with_arguments.contains(engine_name);
|
||||
@ -432,6 +441,63 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
}
|
||||
#endif
|
||||
|
||||
else if (engine_name == "Filesystem")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
/// If init_path is empty, then the current path will be used
|
||||
std::string init_path;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
if (engine->arguments->children.size() != 1)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path");
|
||||
|
||||
const auto & arguments = engine->arguments->children;
|
||||
init_path = safeGetLiteralValue<String>(arguments[0], engine_name);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseFilesystem>(database_name, init_path, context);
|
||||
}
|
||||
|
||||
#if USE_AWS_S3
|
||||
else if (engine_name == "S3")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
DatabaseS3::Configuration config;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
ASTs & engine_args = engine->arguments->children;
|
||||
config = DatabaseS3::parseArguments(engine_args, context);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseS3>(database_name, config, context);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_HDFS
|
||||
else if (engine_name == "HDFS")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
/// If source_url is empty, then table name must contain full url
|
||||
std::string source_url;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
if (engine->arguments->children.size() != 1)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "HDFS database requires at most 1 argument: source_url");
|
||||
|
||||
const auto & arguments = engine->arguments->children;
|
||||
source_url = safeGetLiteralValue<String>(arguments[0], engine_name);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseHDFS>(database_name, source_url, context);
|
||||
}
|
||||
#endif
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name);
|
||||
}
|
||||
|
||||
|
245
src/Databases/DatabaseFilesystem.cpp
Normal file
245
src/Databases/DatabaseFilesystem.cpp
Normal file
@ -0,0 +1,245 @@
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int PATH_ACCESS_DENIED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
}
|
||||
|
||||
DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_)
|
||||
: IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")"))
|
||||
{
|
||||
bool is_local = context_->getApplicationType() == Context::ApplicationType::LOCAL;
|
||||
fs::path user_files_path = is_local ? "" : fs::canonical(getContext()->getUserFilesPath());
|
||||
|
||||
if (fs::path(path).is_relative())
|
||||
{
|
||||
path = user_files_path / path;
|
||||
}
|
||||
else if (!is_local && !pathStartsWith(fs::path(path), user_files_path))
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Path must be inside user-files path: {}", user_files_path.string());
|
||||
}
|
||||
|
||||
path = fs::absolute(path).lexically_normal();
|
||||
if (!fs::exists(path))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path does not exist: {}", path);
|
||||
}
|
||||
|
||||
std::string DatabaseFilesystem::getTablePath(const std::string & table_name) const
|
||||
{
|
||||
fs::path table_path = fs::path(path) / table_name;
|
||||
return table_path.lexically_normal().string();
|
||||
}
|
||||
|
||||
void DatabaseFilesystem::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
/// If run in Local mode, no need for path checking.
|
||||
bool check_path = context_->getApplicationType() != Context::ApplicationType::LOCAL;
|
||||
const auto & user_files_path = context_->getUserFilesPath();
|
||||
|
||||
/// Check access for file before checking its existence.
|
||||
if (check_path && !fileOrSymlinkPathStartsWith(table_path, user_files_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::PATH_ACCESS_DENIED, "File is not inside {}", user_files_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check if the corresponding file exists.
|
||||
if (!fs::exists(table_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "File does not exist: {}", table_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!fs::is_regular_file(table_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST,
|
||||
"File is directory, but expected a file: {}", table_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::tryGetTableFromCache(const std::string & name) const
|
||||
{
|
||||
StoragePtr table = nullptr;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
table = it->second;
|
||||
}
|
||||
|
||||
/// Invalidate cache if file no longer exists.
|
||||
if (table && !fs::exists(getTablePath(name)))
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.erase(name);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
if (tryGetTableFromCache(name))
|
||||
return true;
|
||||
|
||||
return checkTableFilePath(getTablePath(name), context_, /* throw_on_error */false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if table exists in loaded tables map.
|
||||
if (auto table = tryGetTableFromCache(name))
|
||||
return table;
|
||||
|
||||
auto table_path = getTablePath(name);
|
||||
checkTableFilePath(table_path, context_, /* throw_on_error */true);
|
||||
|
||||
/// If the file exists, create a new table using TableFunctionFile and return it.
|
||||
auto args = makeASTFunction("file", std::make_shared<ASTLiteral>(table_path));
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(args, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionFile throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(args, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// getTableImpl can throw exceptions, do not catch them to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Ignore exceptions thrown by TableFunctionFile, which indicate that there is no table
|
||||
/// see tests/02722_database_filesystem.sh for more details.
|
||||
if (e.code() == ErrorCodes::FILE_DOESNT_EXIST)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseFilesystem::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = Filesystem('{}')", backQuoteIfNeed(getDatabaseName()), path);
|
||||
|
||||
ParserCreateQuery parser;
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseFilesystem::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseFilesystem::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseFilesystem::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
67
src/Databases/DatabaseFilesystem.h
Normal file
67
src/Databases/DatabaseFilesystem.h
Normal file
@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseFilesystem allows to interact with files stored on the local filesystem.
|
||||
* Uses TableFunctionFile to implicitly load file when a user requests the table,
|
||||
* and provides a read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access
|
||||
*
|
||||
* Used in clickhouse-local to access local files.
|
||||
* For clickhouse-server requires allows to access file only from user_files directory.
|
||||
*/
|
||||
class DatabaseFilesystem : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabaseFilesystem(const String & name, const String & path, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "Filesystem"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
bool shouldBeEmptyOnDetach() const override { return false; } /// Contains only temporary tables.
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
StoragePtr tryGetTableFromCache(const std::string & name) const;
|
||||
|
||||
std::string getTablePath(const std::string & table_name) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
private:
|
||||
String path;
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
234
src/Databases/DatabaseHDFS.cpp
Normal file
234
src/Databases/DatabaseHDFS.cpp
Normal file
@ -0,0 +1,234 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_HDFS
|
||||
|
||||
#include <Databases/DatabaseHDFS.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Storages/HDFS/HDFSCommon.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
|
||||
#include <Poco/URI.h>
|
||||
#include <re2/re2.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int UNACCEPTABLE_URL;
|
||||
extern const int ACCESS_DENIED;
|
||||
extern const int DATABASE_ACCESS_DENIED;
|
||||
extern const int HDFS_ERROR;
|
||||
extern const int CANNOT_EXTRACT_TABLE_STRUCTURE;
|
||||
}
|
||||
|
||||
static constexpr std::string_view HDFS_HOST_REGEXP = "^hdfs://[^/]*";
|
||||
|
||||
|
||||
DatabaseHDFS::DatabaseHDFS(const String & name_, const String & source_url, ContextPtr context_)
|
||||
: IDatabase(name_)
|
||||
, WithContext(context_->getGlobalContext())
|
||||
, source(source_url)
|
||||
, log(&Poco::Logger::get("DatabaseHDFS(" + name_ + ")"))
|
||||
{
|
||||
if (!source.empty())
|
||||
{
|
||||
if (!re2::RE2::FullMatch(source, std::string(HDFS_HOST_REGEXP)))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs host: {}. "
|
||||
"It should have structure 'hdfs://<host_name>:<port>'", source);
|
||||
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(source));
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseHDFS::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
std::string DatabaseHDFS::getTablePath(const std::string & table_name) const
|
||||
{
|
||||
if (table_name.starts_with("hdfs://"))
|
||||
return table_name;
|
||||
|
||||
if (source.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}. "
|
||||
"It should have structure 'hdfs://<host_name>:<port>/path'", table_name);
|
||||
|
||||
return fs::path(source) / table_name;
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
try
|
||||
{
|
||||
checkHDFSURL(url);
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(url));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (loaded_tables.find(name) != loaded_tables.end())
|
||||
return true;
|
||||
|
||||
return checkUrl(name, context_, false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if the table exists in the loaded tables map.
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto url = getTablePath(name);
|
||||
|
||||
checkUrl(url, context_, true);
|
||||
|
||||
auto args = makeASTFunction("hdfs", std::make_shared<ASTLiteral>(url));
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(args, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionHDFS throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(args, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Rethrow all exceptions from TableFunctionHDFS to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
// Ignore exceptions thrown by TableFunctionHDFS, which indicate that there is no table
|
||||
if (e.code() == ErrorCodes::BAD_ARGUMENTS
|
||||
|| e.code() == ErrorCodes::ACCESS_DENIED
|
||||
|| e.code() == ErrorCodes::DATABASE_ACCESS_DENIED
|
||||
|| e.code() == ErrorCodes::FILE_DOESNT_EXIST
|
||||
|| e.code() == ErrorCodes::UNACCEPTABLE_URL
|
||||
|| e.code() == ErrorCodes::HDFS_ERROR
|
||||
|| e.code() == ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
catch (const Poco::URISyntaxException &)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseHDFS::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = HDFS('{}')", backQuoteIfNeed(getDatabaseName()), source);
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseHDFS::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseHDFS::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseHDFS::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
} // DB
|
||||
|
||||
#endif
|
68
src/Databases/DatabaseHDFS.h
Normal file
68
src/Databases/DatabaseHDFS.h
Normal file
@ -0,0 +1,68 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_HDFS
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseHDFS allows to interact with files stored on the file system.
|
||||
* Uses TableFunctionHDFS to implicitly load file when a user requests the table,
|
||||
* and provides read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access.
|
||||
*/
|
||||
class DatabaseHDFS : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabaseHDFS(const String & name, const String & source_url, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "S3"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
bool shouldBeEmptyOnDetach() const override { return false; } /// Contains only temporary tables.
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
std::string getTablePath(const std::string & table_name) const;
|
||||
|
||||
private:
|
||||
const String source;
|
||||
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
312
src/Databases/DatabaseS3.cpp
Normal file
312
src/Databases/DatabaseS3.cpp
Normal file
@ -0,0 +1,312 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <Databases/DatabaseS3.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <IO/S3/URI.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/NamedCollectionsHelpers.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static const std::unordered_set<std::string_view> optional_configuration_keys = {
|
||||
"url",
|
||||
"access_key_id",
|
||||
"secret_access_key",
|
||||
"no_sign_request"
|
||||
};
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int UNACCEPTABLE_URL;
|
||||
extern const int S3_ERROR;
|
||||
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
DatabaseS3::DatabaseS3(const String & name_, const Configuration& config_, ContextPtr context_)
|
||||
: IDatabase(name_)
|
||||
, WithContext(context_->getGlobalContext())
|
||||
, config(config_)
|
||||
, log(&Poco::Logger::get("DatabaseS3(" + name_ + ")"))
|
||||
{
|
||||
}
|
||||
|
||||
void DatabaseS3::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
std::string DatabaseS3::getFullUrl(const std::string & name) const
|
||||
{
|
||||
if (!config.url_prefix.empty())
|
||||
return fs::path(config.url_prefix) / name;
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
bool DatabaseS3::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
try
|
||||
{
|
||||
S3::URI uri(url);
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(uri.uri);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DatabaseS3::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (loaded_tables.find(name) != loaded_tables.end())
|
||||
return true;
|
||||
|
||||
return checkUrl(getFullUrl(name), context_, false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if the table exists in the loaded tables map.
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto url = getFullUrl(name);
|
||||
checkUrl(url, context_, /* throw_on_error */true);
|
||||
|
||||
auto function = std::make_shared<ASTFunction>();
|
||||
function->name = "s3";
|
||||
function->arguments = std::make_shared<ASTExpressionList>();
|
||||
function->children.push_back(function->arguments);
|
||||
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(url));
|
||||
if (config.no_sign_request)
|
||||
{
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>("NOSIGN"));
|
||||
}
|
||||
else if (config.access_key_id.has_value() && config.secret_access_key.has_value())
|
||||
{
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(config.access_key_id.value()));
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(config.secret_access_key.value()));
|
||||
}
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(function, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionS3 throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(function, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Rethrow all exceptions from TableFunctionS3 to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Ignore exceptions thrown by TableFunctionS3, which indicate that there is no table.
|
||||
if (e.code() == ErrorCodes::BAD_ARGUMENTS
|
||||
|| e.code() == ErrorCodes::S3_ERROR
|
||||
|| e.code() == ErrorCodes::FILE_DOESNT_EXIST
|
||||
|| e.code() == ErrorCodes::UNACCEPTABLE_URL)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
catch (const Poco::URISyntaxException &)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseS3::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseS3::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
|
||||
std::string creation_args;
|
||||
creation_args += fmt::format("'{}'", config.url_prefix);
|
||||
if (config.no_sign_request)
|
||||
creation_args += ", 'NOSIGN'";
|
||||
else if (config.access_key_id.has_value() && config.secret_access_key.has_value())
|
||||
creation_args += fmt::format(", '{}', '{}'", config.access_key_id.value(), config.secret_access_key.value());
|
||||
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = S3({})", backQuoteIfNeed(getDatabaseName()), creation_args);
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseS3::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPtr context_)
|
||||
{
|
||||
Configuration result;
|
||||
|
||||
if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context_))
|
||||
{
|
||||
auto & collection = *named_collection;
|
||||
|
||||
validateNamedCollection(collection, {}, optional_configuration_keys);
|
||||
|
||||
result.url_prefix = collection.getOrDefault<String>("url", "");
|
||||
result.no_sign_request = collection.getOrDefault<bool>("no_sign_request", false);
|
||||
|
||||
auto key_id = collection.getOrDefault<String>("access_key_id", "");
|
||||
auto secret_key = collection.getOrDefault<String>("secret_access_key", "");
|
||||
|
||||
if (!key_id.empty())
|
||||
result.access_key_id = key_id;
|
||||
|
||||
if (!secret_key.empty())
|
||||
result.secret_access_key = secret_key;
|
||||
}
|
||||
else
|
||||
{
|
||||
const std::string supported_signature =
|
||||
" - S3()\n"
|
||||
" - S3('url')\n"
|
||||
" - S3('url', 'NOSIGN')\n"
|
||||
" - S3('url', 'access_key_id', 'secret_access_key')\n";
|
||||
const auto error_message =
|
||||
fmt::format("Engine DatabaseS3 must have the following arguments signature\n{}", supported_signature);
|
||||
|
||||
for (auto & arg : engine_args)
|
||||
arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context_);
|
||||
|
||||
if (engine_args.size() > 3)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, error_message.c_str());
|
||||
|
||||
if (engine_args.empty())
|
||||
return result;
|
||||
|
||||
result.url_prefix = checkAndGetLiteralArgument<String>(engine_args[0], "url");
|
||||
|
||||
// url, NOSIGN
|
||||
if (engine_args.size() == 2)
|
||||
{
|
||||
auto second_arg = checkAndGetLiteralArgument<String>(engine_args[1], "NOSIGN");
|
||||
if (boost::iequals(second_arg, "NOSIGN"))
|
||||
result.no_sign_request = true;
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str());
|
||||
}
|
||||
|
||||
// url, access_key_id, secret_access_key
|
||||
if (engine_args.size() == 3)
|
||||
{
|
||||
auto key_id = checkAndGetLiteralArgument<String>(engine_args[1], "access_key_id");
|
||||
auto secret_key = checkAndGetLiteralArgument<String>(engine_args[2], "secret_access_key");
|
||||
|
||||
if (key_id.empty() || secret_key.empty() || boost::iequals(key_id, "NOSIGN"))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str());
|
||||
|
||||
result.access_key_id = key_id;
|
||||
result.secret_access_key = secret_key;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseS3::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseS3::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
81
src/Databases/DatabaseS3.h
Normal file
81
src/Databases/DatabaseS3.h
Normal file
@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseS3 provides access to data stored in S3.
|
||||
* Uses TableFunctionS3 to implicitly load file when a user requests the table,
|
||||
* and provides read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access.
|
||||
*/
|
||||
class DatabaseS3 : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
struct Configuration
|
||||
{
|
||||
std::string url_prefix;
|
||||
|
||||
bool no_sign_request = false;
|
||||
|
||||
std::optional<std::string> access_key_id;
|
||||
std::optional<std::string> secret_access_key;
|
||||
};
|
||||
|
||||
DatabaseS3(const String & name, const Configuration& config, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "S3"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
// Contains only temporary tables
|
||||
bool shouldBeEmptyOnDetach() const override { return false; }
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
static Configuration parseArguments(ASTs engine_args, ContextPtr context);
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
std::string getFullUrl(const std::string & name) const;
|
||||
|
||||
private:
|
||||
const Configuration config;
|
||||
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
266
src/Databases/DatabasesOverlay.cpp
Normal file
266
src/Databases/DatabasesOverlay.cpp
Normal file
@ -0,0 +1,266 @@
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/InterpreterCreateQuery.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int CANNOT_GET_CREATE_TABLE_QUERY;
|
||||
}
|
||||
|
||||
DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_)
|
||||
: IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get("DatabaseOverlay(" + name_ + ")"))
|
||||
{
|
||||
}
|
||||
|
||||
DatabasesOverlay & DatabasesOverlay::registerNextDatabase(DatabasePtr database)
|
||||
{
|
||||
databases.push_back(std::move(database));
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool DatabasesOverlay::isTableExist(const String & table_name, ContextPtr context_) const
|
||||
{
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
StoragePtr DatabasesOverlay::tryGetTable(const String & table_name, ContextPtr context_) const
|
||||
{
|
||||
StoragePtr result = nullptr;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetTable(table_name, context_);
|
||||
if (result)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::createTable(ContextPtr context_, const String & table_name, const StoragePtr & table, const ASTPtr & query)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (!db->isReadOnly())
|
||||
{
|
||||
db->createTable(context_, table_name, table, query);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for CREATE TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
void DatabasesOverlay::dropTable(ContextPtr context_, const String & table_name, bool sync)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
{
|
||||
db->dropTable(context_, table_name, sync);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for DROP TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
void DatabasesOverlay::attachTable(
|
||||
ContextPtr context_, const String & table_name, const StoragePtr & table, const String & relative_table_path)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
try
|
||||
{
|
||||
db->attachTable(context_, table_name, table, relative_table_path);
|
||||
return;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for ATTACH TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & table_name)
|
||||
{
|
||||
StoragePtr result = nullptr;
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
return db->detachTable(context_, table_name);
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for DETACH TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
ASTPtr result = nullptr;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetCreateTableQuery(name, context_);
|
||||
if (result)
|
||||
break;
|
||||
}
|
||||
if (!result && throw_on_error)
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY,
|
||||
"There is no metadata of table `{}` in database `{}` (engine {})",
|
||||
name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* DatabaseOverlay cannot be constructed by "CREATE DATABASE" query, as it is not a traditional ClickHouse database
|
||||
* To use DatabaseOverlay, it must be constructed programmatically in code
|
||||
*/
|
||||
ASTPtr DatabasesOverlay::getCreateDatabaseQuery() const
|
||||
{
|
||||
return std::make_shared<ASTCreateQuery>();
|
||||
}
|
||||
|
||||
String DatabasesOverlay::getTableDataPath(const String & table_name) const
|
||||
{
|
||||
String result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->getTableDataPath(table_name);
|
||||
if (!result.empty())
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const
|
||||
{
|
||||
String result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->getTableDataPath(query);
|
||||
if (!result.empty())
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const
|
||||
{
|
||||
UUID result = UUIDHelpers::Nil;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetTableUUID(table_name);
|
||||
if (result != UUIDHelpers::Nil)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::drop(ContextPtr context_)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
db->drop(context_);
|
||||
}
|
||||
|
||||
void DatabasesOverlay::alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (!db->isReadOnly() && db->isTableExist(table_id.table_name, local_context))
|
||||
{
|
||||
db->alterTable(local_context, table_id, metadata);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for ALTER TABLE `{}` query in database `{}` (engine {})",
|
||||
table_id.table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>>
|
||||
DatabasesOverlay::getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const
|
||||
{
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
auto db_backup = db->getTablesForBackup(filter, local_context);
|
||||
result.insert(result.end(), std::make_move_iterator(db_backup.begin()), std::make_move_iterator(db_backup.end()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::createTableRestoredFromBackup(
|
||||
const ASTPtr & create_table_query,
|
||||
ContextMutablePtr local_context,
|
||||
std::shared_ptr<IRestoreCoordination> /*restore_coordination*/,
|
||||
UInt64 /*timeout_ms*/)
|
||||
{
|
||||
/// Creates a tables by executing a "CREATE TABLE" query.
|
||||
InterpreterCreateQuery interpreter{create_table_query, local_context};
|
||||
interpreter.setInternal(true);
|
||||
interpreter.execute();
|
||||
}
|
||||
|
||||
bool DatabasesOverlay::empty() const
|
||||
{
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
if (!db->empty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::shutdown()
|
||||
{
|
||||
for (auto & db : databases)
|
||||
db->shutdown();
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabasesOverlay::getTablesIterator(ContextPtr context_, const FilterByNameFunction & filter_by_table_name) const
|
||||
{
|
||||
Tables tables;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
for (auto table_it = db->getTablesIterator(context_, filter_by_table_name); table_it->isValid(); table_it->next())
|
||||
tables.insert({table_it->name(), table_it->table()});
|
||||
}
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(std::move(tables), getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
66
src/Databases/DatabasesOverlay.h
Normal file
66
src/Databases/DatabasesOverlay.h
Normal file
@ -0,0 +1,66 @@
|
||||
#pragma once
|
||||
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/**
|
||||
* Implements the IDatabase interface and combines multiple other databases
|
||||
* Searches for tables in each database in order until found, and delegates operations to the appropriate database
|
||||
* Useful for combining databases
|
||||
*
|
||||
* Used in clickhouse-local to combine DatabaseFileSystem and DatabaseMemory
|
||||
*/
|
||||
class DatabasesOverlay : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabasesOverlay(const String & name_, ContextPtr context_);
|
||||
|
||||
/// Not thread-safe. Use only as factory to initialize database
|
||||
DatabasesOverlay & registerNextDatabase(DatabasePtr database);
|
||||
|
||||
String getEngineName() const override { return "Overlay"; }
|
||||
|
||||
public:
|
||||
bool isTableExist(const String & table_name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & table_name, ContextPtr context) const override;
|
||||
|
||||
void createTable(ContextPtr context, const String & table_name, const StoragePtr & table, const ASTPtr & query) override;
|
||||
|
||||
void dropTable(ContextPtr context, const String & table_name, bool sync) override;
|
||||
|
||||
void attachTable(ContextPtr context, const String & table_name, const StoragePtr & table, const String & relative_table_path) override;
|
||||
|
||||
StoragePtr detachTable(ContextPtr context, const String & table_name) override;
|
||||
|
||||
ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override;
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
String getTableDataPath(const String & table_name) const override;
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override;
|
||||
|
||||
UUID tryGetTableUUID(const String & table_name) const override;
|
||||
|
||||
void drop(ContextPtr context) override;
|
||||
|
||||
void alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const override;
|
||||
|
||||
void createTableRestoredFromBackup(const ASTPtr & create_table_query, ContextMutablePtr local_context, std::shared_ptr<IRestoreCoordination> restore_coordination, UInt64 timeout_ms) override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name) const override;
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
protected:
|
||||
std::vector<DatabasePtr> databases;
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
@ -170,7 +170,7 @@ public:
|
||||
/// Get the table for work. Return nullptr if there is no table.
|
||||
virtual StoragePtr tryGetTable(const String & name, ContextPtr context) const = 0;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const;
|
||||
virtual StoragePtr getTable(const String & name, ContextPtr context) const;
|
||||
|
||||
virtual UUID tryGetTableUUID(const String & /*table_name*/) const { return UUIDHelpers::Nil; }
|
||||
|
||||
@ -183,6 +183,8 @@ public:
|
||||
/// Is the database empty.
|
||||
virtual bool empty() const = 0;
|
||||
|
||||
virtual bool isReadOnly() const { return false; }
|
||||
|
||||
/// Add the table to the database. Record its presence in the metadata.
|
||||
virtual void createTable(
|
||||
ContextPtr /*context*/,
|
||||
|
@ -217,7 +217,7 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
|
||||
std::optional<Configuration> configuration;
|
||||
|
||||
std::string settings_config_prefix = config_prefix + ".clickhouse";
|
||||
auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix) : nullptr;
|
||||
auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, global_context) : nullptr;
|
||||
|
||||
if (named_collection)
|
||||
{
|
||||
|
@ -71,7 +71,7 @@ void registerDictionarySourceMysql(DictionarySourceFactory & factory)
|
||||
MySQLSettings mysql_settings;
|
||||
|
||||
std::optional<MySQLDictionarySource::Configuration> dictionary_configuration;
|
||||
auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix) : nullptr;
|
||||
auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, global_context) : nullptr;
|
||||
if (named_collection)
|
||||
{
|
||||
auto allowed_arguments{dictionary_allowed_keys};
|
||||
|
@ -30,8 +30,6 @@
|
||||
#include <Dictionaries/RegExpTreeDictionary.h>
|
||||
#include <Dictionaries/YAMLRegExpTreeDictionarySource.h>
|
||||
|
||||
#include <re2_st/stringpiece.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_VECTORSCAN
|
||||
@ -469,10 +467,9 @@ public:
|
||||
|
||||
std::pair<String, bool> processBackRefs(const String & data, const re2_st::RE2 & searcher, const std::vector<StringPiece> & pieces)
|
||||
{
|
||||
re2_st::StringPiece haystack(data.data(), data.size());
|
||||
re2_st::StringPiece matches[10];
|
||||
std::string_view matches[10];
|
||||
String result;
|
||||
searcher.Match(haystack, 0, data.size(), re2_st::RE2::Anchor::UNANCHORED, matches, 10);
|
||||
searcher.Match({data.data(), data.size()}, 0, data.size(), re2_st::RE2::Anchor::UNANCHORED, matches, 10);
|
||||
/// if the pattern is a single '$1' but fails to match, we would use the default value.
|
||||
if (pieces.size() == 1 && pieces[0].ref_num >= 0 && pieces[0].ref_num < 10 && matches[pieces[0].ref_num].empty())
|
||||
return std::make_pair(result, true);
|
||||
|
@ -99,8 +99,8 @@ struct ReplaceRegexpImpl
|
||||
int num_captures,
|
||||
const Instructions & instructions)
|
||||
{
|
||||
re2_st::StringPiece haystack(haystack_data, haystack_length);
|
||||
re2_st::StringPiece matches[max_captures];
|
||||
std::string_view haystack(haystack_data, haystack_length);
|
||||
std::string_view matches[max_captures];
|
||||
|
||||
size_t copy_pos = 0;
|
||||
size_t match_pos = 0;
|
||||
|
@ -45,8 +45,8 @@ bool isLargerThanFifty(std::string_view str)
|
||||
/// Check for sub-patterns of the form x{n} or x{n,} can be expensive. Ignore spaces before/after n and m.
|
||||
bool SlowWithHyperscanChecker::isSlowOneRepeat(std::string_view regexp)
|
||||
{
|
||||
re2_st::StringPiece haystack(regexp.data(), regexp.size());
|
||||
re2_st::StringPiece matches[2];
|
||||
std::string_view haystack(regexp.data(), regexp.size());
|
||||
std::string_view matches[2];
|
||||
size_t start_pos = 0;
|
||||
while (start_pos < haystack.size())
|
||||
{
|
||||
@ -67,8 +67,8 @@ bool SlowWithHyperscanChecker::isSlowOneRepeat(std::string_view regexp)
|
||||
/// Check if sub-patterns of the form x{n,m} can be expensive. Ignore spaces before/after n and m.
|
||||
bool SlowWithHyperscanChecker::isSlowTwoRepeats(std::string_view regexp)
|
||||
{
|
||||
re2_st::StringPiece haystack(regexp.data(), regexp.size());
|
||||
re2_st::StringPiece matches[3];
|
||||
std::string_view haystack(regexp.data(), regexp.size());
|
||||
std::string_view matches[3];
|
||||
size_t start_pos = 0;
|
||||
while (start_pos < haystack.size())
|
||||
{
|
||||
|
@ -94,7 +94,6 @@ public:
|
||||
if (needle.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Length of 'needle' argument must be greater than 0.");
|
||||
|
||||
using StringPiece = typename Regexps::Regexp::StringPieceType;
|
||||
const Regexps::Regexp holder = Regexps::createRegexp<false, false, false>(needle);
|
||||
const auto & regexp = holder.getRE2();
|
||||
|
||||
@ -111,7 +110,7 @@ public:
|
||||
groups_count, std::to_string(MAX_GROUPS_COUNT - 1));
|
||||
|
||||
// Including 0-group, which is the whole regexp.
|
||||
PODArrayWithStackMemory<StringPiece, MAX_GROUPS_COUNT> matched_groups(groups_count + 1);
|
||||
PODArrayWithStackMemory<std::string_view, MAX_GROUPS_COUNT> matched_groups(groups_count + 1);
|
||||
|
||||
ColumnArray::ColumnOffsets::MutablePtr root_offsets_col = ColumnArray::ColumnOffsets::create();
|
||||
ColumnArray::ColumnOffsets::MutablePtr nested_offsets_col = ColumnArray::ColumnOffsets::create();
|
||||
@ -160,7 +159,7 @@ public:
|
||||
/// Additional limit to fail fast on supposedly incorrect usage.
|
||||
const auto max_matches_per_row = context->getSettingsRef().regexp_max_matches_per_row;
|
||||
|
||||
PODArray<StringPiece, 0> all_matches;
|
||||
PODArray<std::string_view, 0> all_matches;
|
||||
/// Number of times RE matched on each row of haystack column.
|
||||
PODArray<size_t, 0> number_of_matches_per_row;
|
||||
|
||||
|
@ -75,7 +75,7 @@ public:
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There are no groups in regexp: {}", needle);
|
||||
|
||||
// Including 0-group, which is the whole regexp.
|
||||
PODArrayWithStackMemory<re2_st::StringPiece, 128> matched_groups(groups_count + 1);
|
||||
PODArrayWithStackMemory<std::string_view, 128> matched_groups(groups_count + 1);
|
||||
|
||||
ColumnArray::ColumnOffsets::MutablePtr offsets_col = ColumnArray::ColumnOffsets::create();
|
||||
ColumnString::MutablePtr data_col = ColumnString::create();
|
||||
@ -89,7 +89,7 @@ public:
|
||||
{
|
||||
std::string_view current_row = column_haystack->getDataAt(i).toView();
|
||||
|
||||
if (re2->Match(re2_st::StringPiece(current_row.data(), current_row.size()),
|
||||
if (re2->Match({current_row.data(), current_row.size()},
|
||||
0, current_row.size(), re2_st::RE2::UNANCHORED, matched_groups.data(),
|
||||
static_cast<int>(matched_groups.size())))
|
||||
{
|
||||
|
@ -357,6 +357,7 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
|
||||
auto table = database->tryGetTable(table_id.table_name, context_);
|
||||
if (!table && exception)
|
||||
exception->emplace(Exception(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs()));
|
||||
|
||||
if (!table)
|
||||
database = nullptr;
|
||||
|
||||
|
@ -11,16 +11,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
///
|
||||
/// -------- Column --------- Type ------
|
||||
/// | event_date | DateTime |
|
||||
/// | event_time | UInt64 |
|
||||
/// | query_id | String |
|
||||
/// | remote_file_path | String |
|
||||
/// | segment_range | Tuple |
|
||||
/// | read_type | String |
|
||||
/// -------------------------------------
|
||||
///
|
||||
|
||||
struct FilesystemCacheLogElement
|
||||
{
|
||||
enum class CacheType
|
||||
|
@ -881,46 +881,24 @@ void InterpreterCreateQuery::validateTableStructure(const ASTCreateQuery & creat
|
||||
}
|
||||
}
|
||||
|
||||
String InterpreterCreateQuery::getTableEngineName(DefaultTableEngine default_table_engine)
|
||||
namespace
|
||||
{
|
||||
switch (default_table_engine)
|
||||
void checkTemporaryTableEngineName(const String& name)
|
||||
{
|
||||
case DefaultTableEngine::Log:
|
||||
return "Log";
|
||||
|
||||
case DefaultTableEngine::StripeLog:
|
||||
return "StripeLog";
|
||||
|
||||
case DefaultTableEngine::MergeTree:
|
||||
return "MergeTree";
|
||||
|
||||
case DefaultTableEngine::ReplacingMergeTree:
|
||||
return "ReplacingMergeTree";
|
||||
|
||||
case DefaultTableEngine::ReplicatedMergeTree:
|
||||
return "ReplicatedMergeTree";
|
||||
|
||||
case DefaultTableEngine::ReplicatedReplacingMergeTree:
|
||||
return "ReplicatedReplacingMergeTree";
|
||||
|
||||
case DefaultTableEngine::Memory:
|
||||
return "Memory";
|
||||
|
||||
default:
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "default_table_engine is set to unknown value");
|
||||
if (name.starts_with("Replicated") || name == "KeeperMap")
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with Replicated or KeeperMap table engines");
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterCreateQuery::setDefaultTableEngine(ASTStorage & storage, ContextPtr local_context)
|
||||
{
|
||||
if (local_context->getSettingsRef().default_table_engine.value == DefaultTableEngine::None)
|
||||
void setDefaultTableEngine(ASTStorage &storage, DefaultTableEngine engine)
|
||||
{
|
||||
if (engine == DefaultTableEngine::None)
|
||||
throw Exception(ErrorCodes::ENGINE_REQUIRED, "Table engine is not specified in CREATE query");
|
||||
|
||||
auto engine_ast = std::make_shared<ASTFunction>();
|
||||
auto default_table_engine = local_context->getSettingsRef().default_table_engine.value;
|
||||
engine_ast->name = getTableEngineName(default_table_engine);
|
||||
engine_ast->name = SettingFieldDefaultTableEngine(engine).toString();
|
||||
engine_ast->no_empty_args = true;
|
||||
storage.set(storage.engine, engine_ast);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
@ -936,32 +914,23 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
|
||||
if (create.temporary)
|
||||
{
|
||||
/// It's possible if some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not.
|
||||
/// It makes sense when default_table_engine setting is used, but not for temporary tables.
|
||||
/// For temporary tables we ignore this setting to allow CREATE TEMPORARY TABLE query without specifying ENGINE
|
||||
/// Some part of storage definition is specified, but ENGINE is not: just set the one from default_temporary_table_engine setting.
|
||||
|
||||
if (!create.cluster.empty())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with ON CLUSTER clause");
|
||||
|
||||
if (create.storage)
|
||||
if (!create.storage)
|
||||
{
|
||||
if (create.storage->engine)
|
||||
{
|
||||
if (create.storage->engine->name.starts_with("Replicated") || create.storage->engine->name == "KeeperMap")
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with Replicated or KeeperMap table engines");
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Invalid storage definition for temporary table");
|
||||
}
|
||||
else
|
||||
{
|
||||
auto engine_ast = std::make_shared<ASTFunction>();
|
||||
engine_ast->name = "Memory";
|
||||
engine_ast->no_empty_args = true;
|
||||
auto storage_ast = std::make_shared<ASTStorage>();
|
||||
storage_ast->set(storage_ast->engine, engine_ast);
|
||||
create.set(create.storage, storage_ast);
|
||||
}
|
||||
|
||||
if (!create.storage->engine)
|
||||
{
|
||||
setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_temporary_table_engine.value);
|
||||
}
|
||||
|
||||
checkTemporaryTableEngineName(create.storage->engine->name);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -969,7 +938,7 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
{
|
||||
/// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one.
|
||||
if (!create.storage->engine)
|
||||
setDefaultTableEngine(*create.storage, getContext());
|
||||
setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1008,7 +977,7 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
|
||||
}
|
||||
|
||||
create.set(create.storage, std::make_shared<ASTStorage>());
|
||||
setDefaultTableEngine(*create.storage, getContext());
|
||||
setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value);
|
||||
}
|
||||
|
||||
static void generateUUIDForTable(ASTCreateQuery & create)
|
||||
|
@ -90,8 +90,6 @@ private:
|
||||
/// Calculate list of columns, constraints, indices, etc... of table. Rewrite query in canonical way.
|
||||
TableProperties getTablePropertiesAndNormalizeCreateQuery(ASTCreateQuery & create) const;
|
||||
void validateTableStructure(const ASTCreateQuery & create, const TableProperties & properties) const;
|
||||
static String getTableEngineName(DefaultTableEngine default_table_engine);
|
||||
static void setDefaultTableEngine(ASTStorage & storage, ContextPtr local_context);
|
||||
void setEngine(ASTCreateQuery & create) const;
|
||||
AccessRightsElements getRequiredAccess() const;
|
||||
|
||||
|
@ -370,15 +370,15 @@ BlockIO InterpreterSystemQuery::execute()
|
||||
else
|
||||
{
|
||||
auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache;
|
||||
if (query.delete_key.empty())
|
||||
if (query.key_to_drop.empty())
|
||||
{
|
||||
cache->removeAllReleasable();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto key = FileCacheKey::fromKeyString(query.delete_key);
|
||||
if (query.delete_offset.has_value())
|
||||
cache->removeFileSegment(key, query.delete_offset.value());
|
||||
auto key = FileCacheKey::fromKeyString(query.key_to_drop);
|
||||
if (query.offset_to_drop.has_value())
|
||||
cache->removeFileSegment(key, query.offset_to_drop.value());
|
||||
else
|
||||
cache->removeKey(key);
|
||||
}
|
||||
|
@ -212,11 +212,11 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
|
||||
if (!filesystem_cache_name.empty())
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " " << filesystem_cache_name;
|
||||
if (!delete_key.empty())
|
||||
if (!key_to_drop.empty())
|
||||
{
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << delete_key;
|
||||
if (delete_offset.has_value())
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << delete_offset.value();
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << key_to_drop;
|
||||
if (offset_to_drop.has_value())
|
||||
settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << offset_to_drop.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -107,8 +107,8 @@ public:
|
||||
UInt64 seconds{};
|
||||
|
||||
String filesystem_cache_name;
|
||||
std::string delete_key;
|
||||
std::optional<size_t> delete_offset;
|
||||
std::string key_to_drop;
|
||||
std::optional<size_t> offset_to_drop;
|
||||
|
||||
String backup_name;
|
||||
|
||||
|
@ -409,9 +409,9 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
|
||||
res->filesystem_cache_name = ast->as<ASTLiteral>()->value.safeGet<String>();
|
||||
if (ParserKeyword{"KEY"}.ignore(pos, expected) && ParserIdentifier().parse(pos, ast, expected))
|
||||
{
|
||||
res->delete_key = ast->as<ASTIdentifier>()->name();
|
||||
res->key_to_drop = ast->as<ASTIdentifier>()->name();
|
||||
if (ParserKeyword{"OFFSET"}.ignore(pos, expected) && ParserLiteral().parse(pos, ast, expected))
|
||||
res->delete_offset = ast->as<ASTLiteral>()->value.safeGet<UInt64>();
|
||||
res->offset_to_drop = ast->as<ASTLiteral>()->value.safeGet<UInt64>();
|
||||
}
|
||||
}
|
||||
if (!parseQueryWithOnCluster(res, pos, expected))
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <re2_st/re2.h>
|
||||
#include <re2_st/stringpiece.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <Core/Block.h>
|
||||
@ -28,14 +27,14 @@ public:
|
||||
/// Return true if row was successfully parsed and row fields were extracted.
|
||||
bool parseRow(PeekableReadBuffer & buf);
|
||||
|
||||
re2_st::StringPiece getField(size_t index) { return matched_fields[index]; }
|
||||
std::string_view getField(size_t index) { return matched_fields[index]; }
|
||||
size_t getMatchedFieldsSize() const { return matched_fields.size(); }
|
||||
size_t getNumberOfGroups() const { return regexp.NumberOfCapturingGroups(); }
|
||||
|
||||
private:
|
||||
const re2_st::RE2 regexp;
|
||||
// The vector of fields extracted from line using regexp.
|
||||
std::vector<re2_st::StringPiece> matched_fields;
|
||||
std::vector<std::string_view> matched_fields;
|
||||
// These two vectors are needed to use RE2::FullMatchN (function for extracting fields).
|
||||
std::vector<re2_st::RE2::Arg> re2_arguments;
|
||||
std::vector<re2_st::RE2::Arg *> re2_arguments_ptrs;
|
||||
|
@ -44,6 +44,8 @@
|
||||
#include <Poco/String.h>
|
||||
#include <Poco/Net/SocketAddress.h>
|
||||
|
||||
#include <re2/re2.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <sstream>
|
||||
|
||||
@ -1163,8 +1165,8 @@ void PredefinedQueryHandler::customizeContext(HTTPServerRequest & request, Conte
|
||||
{
|
||||
int num_captures = compiled_regex->NumberOfCapturingGroups() + 1;
|
||||
|
||||
re2::StringPiece matches[num_captures];
|
||||
re2::StringPiece input(begin, end - begin);
|
||||
std::string_view matches[num_captures];
|
||||
std::string_view input(begin, end - begin);
|
||||
if (compiled_regex->Match(input, 0, end - begin, re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures))
|
||||
{
|
||||
for (const auto & [capturing_name, capturing_index] : compiled_regex->NamedCapturingGroups())
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <base/find_symbols.h>
|
||||
|
||||
#include <re2/re2.h>
|
||||
#include <re2/stringpiece.h>
|
||||
#include <Poco/StringTokenizer.h>
|
||||
#include <Poco/Util/LayeredConfiguration.h>
|
||||
|
||||
@ -26,9 +25,8 @@ static inline bool checkRegexExpression(std::string_view match_str, const Compil
|
||||
{
|
||||
int num_captures = compiled_regex->NumberOfCapturingGroups() + 1;
|
||||
|
||||
re2::StringPiece matches[num_captures];
|
||||
re2::StringPiece match_input(match_str.data(), match_str.size());
|
||||
return compiled_regex->Match(match_input, 0, match_str.size(), re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures);
|
||||
std::string_view matches[num_captures];
|
||||
return compiled_regex->Match({match_str.data(), match_str.size()}, 0, match_str.size(), re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures);
|
||||
}
|
||||
|
||||
static inline bool checkExpression(std::string_view match_str, const std::pair<String, CompiledRegexPtr> & expression)
|
||||
|
@ -7196,7 +7196,10 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage(
|
||||
if (query_context->canUseParallelReplicasOnInitiator() && to_stage >= QueryProcessingStage::WithMergeableState)
|
||||
{
|
||||
if (!canUseParallelReplicasBasedOnPKAnalysis(query_context, storage_snapshot, query_info))
|
||||
{
|
||||
query_info.parallel_replicas_disabled = true;
|
||||
return QueryProcessingStage::Enum::FetchColumns;
|
||||
}
|
||||
|
||||
/// ReplicatedMergeTree
|
||||
if (supportsReplication())
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "NamedCollectionsHelpers.h"
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Common/NamedCollections/NamedCollections.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
@ -15,19 +16,16 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
NamedCollectionPtr tryGetNamedCollectionFromASTs(ASTs asts, bool throw_unknown_collection)
|
||||
std::optional<std::string> getCollectionName(ASTs asts)
|
||||
{
|
||||
if (asts.empty())
|
||||
return nullptr;
|
||||
return std::nullopt;
|
||||
|
||||
const auto * identifier = asts[0]->as<ASTIdentifier>();
|
||||
if (!identifier)
|
||||
return nullptr;
|
||||
return std::nullopt;
|
||||
|
||||
const auto & collection_name = identifier->name();
|
||||
if (throw_unknown_collection)
|
||||
return NamedCollectionFactory::instance().get(collection_name);
|
||||
return NamedCollectionFactory::instance().tryGet(collection_name);
|
||||
return identifier->name();
|
||||
}
|
||||
|
||||
std::optional<std::pair<std::string, std::variant<Field, ASTPtr>>> getKeyValueFromAST(ASTPtr ast, bool fallback_to_ast_value, ContextPtr context)
|
||||
@ -74,7 +72,18 @@ MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(
|
||||
|
||||
NamedCollectionUtils::loadIfNot();
|
||||
|
||||
auto collection = tryGetNamedCollectionFromASTs(asts, throw_unknown_collection);
|
||||
auto collection_name = getCollectionName(asts);
|
||||
if (!collection_name.has_value())
|
||||
return nullptr;
|
||||
|
||||
context->checkAccess(AccessType::NAMED_COLLECTION, *collection_name);
|
||||
|
||||
NamedCollectionPtr collection;
|
||||
if (throw_unknown_collection)
|
||||
collection = NamedCollectionFactory::instance().get(*collection_name);
|
||||
else
|
||||
collection = NamedCollectionFactory::instance().tryGet(*collection_name);
|
||||
|
||||
if (!collection)
|
||||
return nullptr;
|
||||
|
||||
@ -106,12 +115,14 @@ MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(
|
||||
}
|
||||
|
||||
MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(
|
||||
const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix)
|
||||
const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, ContextPtr context)
|
||||
{
|
||||
auto collection_name = config.getString(config_prefix + ".name", "");
|
||||
if (collection_name.empty())
|
||||
return nullptr;
|
||||
|
||||
context->checkAccess(AccessType::NAMED_COLLECTION, collection_name);
|
||||
|
||||
const auto & collection = NamedCollectionFactory::instance().get(collection_name);
|
||||
auto collection_copy = collection->duplicate();
|
||||
|
||||
|
@ -22,7 +22,7 @@ MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(
|
||||
ASTs asts, ContextPtr context, bool throw_unknown_collection = true, std::vector<std::pair<std::string, ASTPtr>> * complex_args = nullptr);
|
||||
/// Helper function to get named collection for dictionary source.
|
||||
/// Dictionaries have collection name as name argument of dict configuration and other arguments are overrides.
|
||||
MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix);
|
||||
MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, ContextPtr context);
|
||||
|
||||
HTTPHeaderEntries getHeadersFromNamedCollection(const NamedCollection & collection);
|
||||
|
||||
|
@ -255,6 +255,8 @@ struct SelectQueryInfo
|
||||
Block minmax_count_projection_block;
|
||||
MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr;
|
||||
|
||||
bool parallel_replicas_disabled = false;
|
||||
|
||||
bool is_parameterized_view = false;
|
||||
NameToNameMap parameterized_view_values;
|
||||
|
||||
|
@ -209,7 +209,9 @@ void StorageMergeTree::read(
|
||||
size_t max_block_size,
|
||||
size_t num_streams)
|
||||
{
|
||||
if (local_context->canUseParallelReplicasOnInitiator() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
|
||||
if (!query_info.parallel_replicas_disabled &&
|
||||
local_context->canUseParallelReplicasOnInitiator() &&
|
||||
local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
|
||||
{
|
||||
auto table_id = getStorageID();
|
||||
|
||||
@ -240,7 +242,10 @@ void StorageMergeTree::read(
|
||||
}
|
||||
else
|
||||
{
|
||||
const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
|
||||
const bool enable_parallel_reading =
|
||||
!query_info.parallel_replicas_disabled &&
|
||||
local_context->canUseParallelReplicasOnFollower() &&
|
||||
local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
|
||||
|
||||
if (auto plan = reader.read(
|
||||
column_names, storage_snapshot, query_info,
|
||||
@ -929,14 +934,20 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMerge(
|
||||
|
||||
SelectPartsDecision select_decision = SelectPartsDecision::CANNOT_SELECT;
|
||||
|
||||
if (!canEnqueueBackgroundTask())
|
||||
auto is_background_memory_usage_ok = [](String * disable_reason) -> bool
|
||||
{
|
||||
if (out_disable_reason)
|
||||
*out_disable_reason = fmt::format("Current background tasks memory usage ({}) is more than the limit ({})",
|
||||
if (canEnqueueBackgroundTask())
|
||||
return true;
|
||||
if (disable_reason)
|
||||
*disable_reason = fmt::format("Current background tasks memory usage ({}) is more than the limit ({})",
|
||||
formatReadableSizeWithBinarySuffix(background_memory_tracker.get()),
|
||||
formatReadableSizeWithBinarySuffix(background_memory_tracker.getSoftLimit()));
|
||||
}
|
||||
else if (partition_id.empty())
|
||||
return false;
|
||||
};
|
||||
|
||||
if (partition_id.empty())
|
||||
{
|
||||
if (is_background_memory_usage_ok(out_disable_reason))
|
||||
{
|
||||
UInt64 max_source_parts_size = merger_mutator.getMaxSourcePartsSizeForMerge();
|
||||
bool merge_with_ttl_allowed = getTotalMergesWithTTLInMergeList() < data_settings->max_number_of_merges_with_ttl_in_pool;
|
||||
@ -958,15 +969,35 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMerge(
|
||||
else if (out_disable_reason)
|
||||
*out_disable_reason = "Current value of max_source_parts_size is zero";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
select_decision = merger_mutator.selectAllPartsToMergeWithinPartition(
|
||||
future_part, can_merge, partition_id, final, metadata_snapshot, txn, out_disable_reason, optimize_skip_merged_partitions);
|
||||
auto timeout_ms = getSettings()->lock_acquire_timeout_for_background_operations.totalMilliseconds();
|
||||
auto timeout = std::chrono::milliseconds(timeout_ms);
|
||||
|
||||
if (!is_background_memory_usage_ok(out_disable_reason))
|
||||
{
|
||||
constexpr auto poll_interval = std::chrono::seconds(1);
|
||||
Int64 attempts = timeout / poll_interval;
|
||||
bool ok = false;
|
||||
for (Int64 i = 0; i < attempts; ++i)
|
||||
{
|
||||
std::this_thread::sleep_for(poll_interval);
|
||||
if (is_background_memory_usage_ok(out_disable_reason))
|
||||
{
|
||||
ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!ok)
|
||||
break;
|
||||
}
|
||||
|
||||
select_decision = merger_mutator.selectAllPartsToMergeWithinPartition(
|
||||
future_part, can_merge, partition_id, final, metadata_snapshot, txn, out_disable_reason, optimize_skip_merged_partitions);
|
||||
|
||||
/// If final - we will wait for currently processing merges to finish and continue.
|
||||
if (final
|
||||
&& select_decision != SelectPartsDecision::SELECTED
|
||||
|
@ -243,7 +243,7 @@ function check_logs_for_critical_errors()
|
||||
# Remove file fatal_messages.txt if it's empty
|
||||
[ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt
|
||||
|
||||
rg -Fa "########################################" /test_output/* > /dev/null \
|
||||
rg -Faz "########################################" /test_output/* > /dev/null \
|
||||
&& echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv
|
||||
|
||||
function get_gdb_log_context()
|
||||
|
@ -32,5 +32,10 @@
|
||||
<secret_access_key>testtest</secret_access_key>
|
||||
<structure>auto</structure>
|
||||
</s3_conn>
|
||||
<s3_conn_db>
|
||||
<url>http://localhost:11111/test/</url>
|
||||
<access_key_id>test</access_key_id>
|
||||
<secret_access_key>testtest</secret_access_key>
|
||||
</s3_conn_db>
|
||||
</named_collections>
|
||||
</clickhouse>
|
||||
|
@ -12,6 +12,7 @@
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
|
||||
|
@ -8,9 +8,14 @@ import logging
|
||||
|
||||
DICTS = ["configs/dictionaries/mysql_dict1.xml", "configs/dictionaries/mysql_dict2.xml"]
|
||||
CONFIG_FILES = ["configs/remote_servers.xml", "configs/named_collections.xml"]
|
||||
USER_CONFIGS = ["configs/users.xml"]
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance(
|
||||
"instance", main_configs=CONFIG_FILES, with_mysql=True, dictionaries=DICTS
|
||||
"instance",
|
||||
main_configs=CONFIG_FILES,
|
||||
user_configs=USER_CONFIGS,
|
||||
with_mysql=True,
|
||||
dictionaries=DICTS,
|
||||
)
|
||||
|
||||
create_table_mysql_template = """
|
||||
|
@ -0,0 +1,10 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
<use_named_collections>1</use_named_collections>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -9,6 +9,7 @@ node = cluster.add_instance(
|
||||
main_configs=[
|
||||
"configs/named_collections.xml",
|
||||
],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_zookeeper=True,
|
||||
)
|
||||
|
||||
|
@ -0,0 +1,10 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -12,6 +12,7 @@ cluster = ClickHouseCluster(__file__)
|
||||
clickhouse_node = cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/remote_servers.xml", "configs/named_collections.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_mysql=True,
|
||||
stay_alive=True,
|
||||
)
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -8,7 +8,10 @@ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance(
|
||||
"node1", main_configs=["configs/named_collections.xml"], with_postgres=True
|
||||
"node1",
|
||||
main_configs=["configs/named_collections.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_postgres=True,
|
||||
)
|
||||
|
||||
postgres_table_template = """
|
||||
|
@ -4,4 +4,11 @@
|
||||
<allow_experimental_database_materialized_postgresql>1</allow_experimental_database_materialized_postgresql>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -9,6 +9,7 @@ cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/named_collections.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_zookeeper=False,
|
||||
with_hdfs=True,
|
||||
)
|
||||
|
9
tests/integration/test_s3_cluster/configs/users.xml
Normal file
9
tests/integration/test_s3_cluster/configs/users.xml
Normal file
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -68,6 +68,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"s0_0_0",
|
||||
main_configs=["configs/cluster.xml", "configs/named_collections.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
macros={"replica": "node1", "shard": "shard1"},
|
||||
with_minio=True,
|
||||
with_zookeeper=True,
|
||||
@ -75,12 +76,14 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"s0_0_1",
|
||||
main_configs=["configs/cluster.xml", "configs/named_collections.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
macros={"replica": "replica2", "shard": "shard1"},
|
||||
with_zookeeper=True,
|
||||
)
|
||||
cluster.add_instance(
|
||||
"s0_1_0",
|
||||
main_configs=["configs/cluster.xml", "configs/named_collections.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
macros={"replica": "replica1", "shard": "shard2"},
|
||||
with_zookeeper=True,
|
||||
)
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -11,6 +11,9 @@ node = cluster.add_instance(
|
||||
main_configs=[
|
||||
"configs/config.d/minio.xml",
|
||||
],
|
||||
user_configs=[
|
||||
"configs/users.d/users.xml",
|
||||
],
|
||||
with_minio=True,
|
||||
)
|
||||
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -25,7 +25,7 @@ def cluster():
|
||||
cluster.add_instance(
|
||||
"node",
|
||||
main_configs=["configs/named_collections.xml"],
|
||||
user_configs=["configs/disable_profilers.xml"],
|
||||
user_configs=["configs/disable_profilers.xml", "configs/users.xml"],
|
||||
with_azurite=True,
|
||||
)
|
||||
cluster.start()
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -53,6 +53,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/config.d/named_collections.xml"],
|
||||
user_configs=["configs/users.d/users.xml"],
|
||||
with_minio=True,
|
||||
)
|
||||
|
||||
|
9
tests/integration/test_storage_dict/configs/users.xml
Normal file
9
tests/integration/test_storage_dict/configs/users.xml
Normal file
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -10,7 +10,10 @@ def cluster():
|
||||
try:
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
cluster.add_instance(
|
||||
"node1", main_configs=["configs/conf.xml"], with_nginx=True
|
||||
"node1",
|
||||
main_configs=["configs/conf.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_nginx=True,
|
||||
)
|
||||
cluster.start()
|
||||
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -51,6 +51,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/config.d/named_collections.xml"],
|
||||
user_configs=["configs/users.d/users.xml"],
|
||||
with_minio=True,
|
||||
)
|
||||
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -53,6 +53,7 @@ def started_cluster():
|
||||
cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/config.d/named_collections.xml"],
|
||||
user_configs=["configs/users.d/users.xml"],
|
||||
with_minio=True,
|
||||
)
|
||||
|
||||
|
@ -6,4 +6,11 @@
|
||||
<insert_keeper_max_retries>0</insert_keeper_max_retries>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
|
@ -111,6 +111,23 @@ cat > /usr/local/hadoop/etc/hadoop/hdfs-site.xml << EOF
|
||||
<name>dfs.datanode.http.address</name>
|
||||
<value>0.0.0.0:1006</value>
|
||||
</property>
|
||||
<!-- If the port is 0 then the server will start on a free port. -->
|
||||
<property>
|
||||
<name>dfs.datanode.ipc.address</name>
|
||||
<value>0.0.0.0:0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.secondary.http-address</name>
|
||||
<value>0.0.0.0:0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.backup.address</name>
|
||||
<value>0.0.0.0:0</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.backup.http-address</name>
|
||||
<value>0.0.0.0:0</value>
|
||||
</property>
|
||||
<!--
|
||||
<property>
|
||||
<name>dfs.http.policy</name>
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -16,7 +16,10 @@ def started_cluster(request):
|
||||
try:
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance(
|
||||
"meili", main_configs=["configs/named_collection.xml"], with_meili=True
|
||||
"meili",
|
||||
main_configs=["configs/named_collection.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_meili=True,
|
||||
)
|
||||
cluster.start()
|
||||
yield cluster
|
||||
|
9
tests/integration/test_storage_mongodb/configs/users.xml
Normal file
9
tests/integration/test_storage_mongodb/configs/users.xml
Normal file
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -17,6 +17,7 @@ def started_cluster(request):
|
||||
"configs_secure/config.d/ssl_conf.xml",
|
||||
"configs/named_collections.xml",
|
||||
],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_mongo=True,
|
||||
with_mongo_secure=request.param,
|
||||
)
|
||||
|
@ -12,6 +12,7 @@
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
|
@ -13,6 +13,7 @@ cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/remote_servers.xml", "configs/named_collections.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_mysql=True,
|
||||
)
|
||||
node2 = cluster.add_instance(
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -7,12 +7,15 @@ from helpers.postgres_utility import get_postgres_conn
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance(
|
||||
"node1", main_configs=["configs/named_collections.xml"], with_postgres=True
|
||||
"node1",
|
||||
main_configs=["configs/named_collections.xml"],
|
||||
user_configs=["configs/users.xml"],
|
||||
with_postgres=True,
|
||||
)
|
||||
node2 = cluster.add_instance(
|
||||
"node2",
|
||||
main_configs=["configs/named_collections.xml"],
|
||||
user_configs=["configs/settings.xml"],
|
||||
user_configs=["configs/settings.xml", "configs/users.xml"],
|
||||
with_postgres_cluster=True,
|
||||
)
|
||||
|
||||
|
@ -4,4 +4,11 @@
|
||||
<stream_like_engine_allow_direct_select>1</stream_like_engine_allow_direct_select>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
|
19
tests/integration/test_storage_s3/configs/access.xml
Normal file
19
tests/integration/test_storage_s3/configs/access.xml
Normal file
@ -0,0 +1,19 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<admin>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
<grants>
|
||||
<query>GRANT admin_role</query>
|
||||
</grants>
|
||||
</admin>
|
||||
</users>
|
||||
<roles>
|
||||
<admin_role>
|
||||
<grants>
|
||||
<query>GRANT USE NAMED COLLECTION ON * WITH GRANT OPTION</query>
|
||||
</grants>
|
||||
</admin_role>
|
||||
</roles>
|
||||
</clickhouse>
|
9
tests/integration/test_storage_s3/configs/users.xml
Normal file
9
tests/integration/test_storage_s3/configs/users.xml
Normal file
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
@ -55,6 +55,17 @@ def started_cluster():
|
||||
"configs/named_collections.xml",
|
||||
"configs/schema_cache.xml",
|
||||
],
|
||||
user_configs=["configs/access.xml", "configs/users.xml"],
|
||||
)
|
||||
cluster.add_instance(
|
||||
"dummy_without_named_collections",
|
||||
with_minio=True,
|
||||
main_configs=[
|
||||
"configs/defaultS3.xml",
|
||||
"configs/named_collections.xml",
|
||||
"configs/schema_cache.xml",
|
||||
],
|
||||
user_configs=["configs/access.xml"],
|
||||
)
|
||||
cluster.add_instance(
|
||||
"s3_max_redirects",
|
||||
@ -918,25 +929,61 @@ def test_truncate_table(started_cluster):
|
||||
|
||||
def test_predefined_connection_configuration(started_cluster):
|
||||
bucket = started_cluster.minio_bucket
|
||||
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
|
||||
instance = started_cluster.instances[
|
||||
"dummy_without_named_collections"
|
||||
] # type: ClickHouseInstance
|
||||
name = "test_table"
|
||||
|
||||
instance.query("drop table if exists {}".format(name))
|
||||
instance.query(
|
||||
"CREATE TABLE {} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')".format(name)
|
||||
instance.query("CREATE USER user")
|
||||
instance.query("GRANT CREATE ON *.* TO user")
|
||||
instance.query("GRANT SOURCES ON *.* TO user")
|
||||
instance.query("GRANT SELECT ON *.* TO user")
|
||||
|
||||
instance.query(f"drop table if exists {name}", user="user")
|
||||
error = instance.query_and_get_error(
|
||||
f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')"
|
||||
)
|
||||
assert (
|
||||
"To execute this query it's necessary to have grant NAMED COLLECTION ON s3_conf1"
|
||||
in error
|
||||
)
|
||||
error = instance.query_and_get_error(
|
||||
f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')",
|
||||
user="user",
|
||||
)
|
||||
assert (
|
||||
"To execute this query it's necessary to have grant NAMED COLLECTION ON s3_conf1"
|
||||
in error
|
||||
)
|
||||
|
||||
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
|
||||
result = instance.query("SELECT * FROM {}".format(name))
|
||||
instance.query("GRANT NAMED COLLECTION ON s3_conf1 TO user", user="admin")
|
||||
instance.query(
|
||||
f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')",
|
||||
user="user",
|
||||
)
|
||||
|
||||
instance.query(f"INSERT INTO {name} SELECT number FROM numbers(10)")
|
||||
result = instance.query(f"SELECT * FROM {name}")
|
||||
assert result == instance.query("SELECT number FROM numbers(10)")
|
||||
|
||||
result = instance.query(
|
||||
"SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')"
|
||||
"SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')", user="user"
|
||||
)
|
||||
assert result == instance.query("SELECT number FROM numbers(10)")
|
||||
|
||||
result = instance.query_and_get_error("SELECT * FROM s3(no_collection)")
|
||||
assert "There is no named collection `no_collection`" in result
|
||||
error = instance.query_and_get_error("SELECT * FROM s3(no_collection)")
|
||||
assert (
|
||||
"To execute this query it's necessary to have grant NAMED COLLECTION ON no_collection"
|
||||
in error
|
||||
)
|
||||
error = instance.query_and_get_error("SELECT * FROM s3(no_collection)", user="user")
|
||||
assert (
|
||||
"To execute this query it's necessary to have grant NAMED COLLECTION ON no_collection"
|
||||
in error
|
||||
)
|
||||
instance = started_cluster.instances["dummy"] # has named collection access
|
||||
error = instance.query_and_get_error("SELECT * FROM s3(no_collection)")
|
||||
assert "There is no named collection `no_collection`" in error
|
||||
|
||||
|
||||
result = ""
|
||||
|
@ -92,6 +92,7 @@ def started_cluster():
|
||||
"configs/use_environment_credentials.xml",
|
||||
"configs/named_collections.xml",
|
||||
],
|
||||
user_configs=["configs/users.xml"],
|
||||
)
|
||||
|
||||
logging.info("Starting cluster...")
|
||||
|
9
tests/integration/test_storage_url/configs/users.xml
Normal file
9
tests/integration/test_storage_url/configs/users.xml
Normal file
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<profile>default</profile>
|
||||
<named_collection_control>1</named_collection_control>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user