Merge remote-tracking branch 'blessed/master' into stress_log

This commit is contained in:
Raúl Marín 2024-02-23 10:50:46 +01:00
commit c359f29842
134 changed files with 2110 additions and 634 deletions

View File

@ -375,28 +375,12 @@ jobs:
test_name: Stateless tests (release)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestReleaseDatabaseReplicated:
FunctionalStatelessTestReleaseAnalyzerS3Replicated:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, DatabaseReplicated)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestReleaseAnalyzer:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, analyzer)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestReleaseS3:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, s3 storage)
test_name: Stateless tests (release, analyzer, s3, DatabaseReplicated)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestS3Debug:
@ -825,9 +809,7 @@ jobs:
- MarkReleaseReady
- FunctionalStatelessTestDebug
- FunctionalStatelessTestRelease
- FunctionalStatelessTestReleaseDatabaseReplicated
- FunctionalStatelessTestReleaseAnalyzer
- FunctionalStatelessTestReleaseS3
- FunctionalStatelessTestReleaseAnalyzerS3Replicated
- FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan
- FunctionalStatelessTestTsan

View File

@ -391,28 +391,12 @@ jobs:
test_name: Stateless tests (release)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestReleaseDatabaseReplicated:
FunctionalStatelessTestReleaseAnalyzerS3Replicated:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, DatabaseReplicated)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestReleaseAnalyzer:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, analyzer)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestReleaseS3:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stateless tests (release, s3 storage)
test_name: Stateless tests (release, analyzer, s3, DatabaseReplicated)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FunctionalStatelessTestS3Debug:
@ -750,14 +734,6 @@ jobs:
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsAsan:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsAnalyzerAsan:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
@ -774,14 +750,6 @@ jobs:
test_name: Integration tests (tsan)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsAarch64:
needs: [RunConfig, BuilderDebAarch64]
if: ${{ !failure() && !cancelled() }}
@ -890,8 +858,6 @@ jobs:
- TestsBugfixCheck
- FunctionalStatelessTestDebug
- FunctionalStatelessTestRelease
- FunctionalStatelessTestReleaseDatabaseReplicated
- FunctionalStatelessTestReleaseAnalyzer
- FunctionalStatelessTestAarch64
- FunctionalStatelessTestAsan
- FunctionalStatelessTestTsan
@ -904,9 +870,9 @@ jobs:
- FunctionalStatefulTestTsan
- FunctionalStatefulTestMsan
- FunctionalStatefulTestUBsan
- FunctionalStatelessTestReleaseS3
- FunctionalStatelessTestS3Debug
- FunctionalStatelessTestS3Tsan
- FunctionalStatelessTestReleaseAnalyzerS3Replicated
- FunctionalStatefulTestReleaseParallelReplicas
- FunctionalStatefulTestAsanParallelReplicas
- FunctionalStatefulTestTsanParallelReplicas
@ -927,10 +893,8 @@ jobs:
- ASTFuzzerTestTsan
- ASTFuzzerTestMSan
- ASTFuzzerTestUBSan
- IntegrationTestsAsan
- IntegrationTestsAnalyzerAsan
- IntegrationTestsTsan
- IntegrationTestsRelease
- IntegrationTestsAarch64
- IntegrationTestsFlakyCheck
- PerformanceComparisonX86

2
contrib/rapidjson vendored

@ -1 +1 @@
Subproject commit c4ef90ccdbc21d5d5a628d08316bfd301e32d6fa
Subproject commit a9bc56c9165f1dbbbcada64221bd3a59042c5b95

View File

@ -232,8 +232,8 @@ function check_logs_for_critical_errors()
# Remove file logical_errors.txt if it's empty
[ -s /test_output/logical_errors.txt ] || rm /test_output/logical_errors.txt
# No such key errors
rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/no_such_key_errors.txt \
# No such key errors (ignore a.myext which is used in 02724_database_s3.sh and does not exist)
rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log | grep -v "a.myext" > /test_output/no_such_key_errors.txt \
&& echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(trim_server_logs no_such_key_errors.txt)" >> /test_output/test_results.tsv \
|| echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv

View File

@ -472,10 +472,10 @@ The value 0 means that you can delete all tables without any restrictions.
``` xml
<max_table_size_to_drop>0</max_table_size_to_drop>
```
## max\_database\_num\_to\_warn {#max-database-num-to-warn}
If the number of attached databases exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
## max\_database\_num\_to\_warn {#max-database-num-to-warn}
If the number of attached databases exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
Default value: 1000
**Example**
@ -483,10 +483,10 @@ Default value: 1000
``` xml
<max_database_num_to_warn>50</max_database_num_to_warn>
```
## max\_table\_num\_to\_warn {#max-table-num-to-warn}
If the number of attached tables exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
Default value: 5000
## max\_table\_num\_to\_warn {#max-table-num-to-warn}
If the number of attached tables exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
Default value: 5000
**Example**
@ -495,9 +495,9 @@ Default value: 5000
```
## max\_part\_num\_to\_warn {#max-part-num-to-warn}
If the number of active parts exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
Default value: 100000
## max\_part\_num\_to\_warn {#max-part-num-to-warn}
If the number of active parts exceeds the specified value, clickhouse server will add warning messages to `system.warnings` table.
Default value: 100000
**Example**
@ -2873,3 +2873,11 @@ A limit on the number of materialized views attached to a table.
Note that only directly dependent views are considered here, and the creation of one view on top of another view is not considered.
Default value: `0`.
## format_alter_operations_with_parentheses {#format_alter_operations_with_parentheses}
If set to true, then alter operations will be surrounded by parentheses in formatted queries. This makes the parsing of formatted alter queries less ambiguous.
Type: Bool
Default: 0

View File

@ -5345,6 +5345,24 @@ SELECT map('a', range(number), 'b', number, 'c', 'str_' || toString(number)) as
Default value: `false`.
## default_normal_view_sql_security {#default_normal_view_sql_security}
Allows to set default `SQL SECURITY` option while creating a normal view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
The default value is `INVOKER`.
## default_materialized_view_sql_security {#default_materialized_view_sql_security}
Allows to set a default value for SQL SECURITY option when creating a materialized view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
The default value is `DEFINER`.
## default_view_definer {#default_view_definer}
Allows to set default `DEFINER` option while creating a view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
The default value is `CURRENT_USER`.
## max_partition_size_to_drop
Restriction on dropping partitions in query time. The value 0 means that you can drop partitions without any restrictions.

View File

@ -13,7 +13,9 @@ Creates a new view. Views can be [normal](#normal-view), [materialized](#materia
Syntax:
``` sql
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] AS SELECT ...
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ...
```
Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
@ -52,7 +54,9 @@ SELECT * FROM view(column1=value1, column2=value2 ...)
## Materialized View
``` sql
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ...
```
:::tip
@ -91,6 +95,49 @@ Views look the same as normal tables. For example, they are listed in the result
To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Although `DROP TABLE` works for VIEWs as well.
## SQL security {#sql_security}
`DEFINER` and `SQL SECURITY` allow you to specify which ClickHouse user to use when executing the view's underlying query.
`SQL SECURITY` has three legal values: `DEFINER`, `INVOKER`, or `NONE`. You can specify any existing user or `CURRENT_USER` in the `DEFINER` clause.
The following table will explain which rights are required for which user in order to select from view.
Note that regardless of the SQL security option, in every case it is still required to have `GRANT SELECT ON <view>` in order to read from it.
| SQL security option | View | Materialized View |
|---------------------|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
| `DEFINER alice` | `alice` must have a `SELECT` grant for the view's source table. | `alice` must have a `SELECT` grant for the view's source table and an `INSERT` grant for the view's target table. |
| `INVOKER` | User must have a `SELECT` grant for the view's source table. | `SQL SECURITY INVOKER` can't be specified for materialized views. |
| `NONE` | - | - |
:::note
`SQL SECURITY NONE` is a deprecated option. Any user with the rights to create views with `SQL SECURITY NONE` will be able to execute any arbitrary query.
Thus, it is required to have `GRANT ALLOW SQL SECURITY NONE TO <user>` in order to create a view with this option.
:::
If `DEFINER`/`SQL SECURITY` aren't specified, the default values are used:
- `SQL SECURITY`: `INVOKER` for normal views and `DEFINER` for materialized views ([configurable by settings](../../../operations/settings/settings.md#default_normal_view_sql_security))
- `DEFINER`: `CURRENT_USER` ([configurable by settings](../../../operations/settings/settings.md#default_view_definer))
If a view is attached without `DEFINER`/`SQL SECURITY` specified, the default value is `SQL SECURITY NONE` for the materialized view and `SQL SECURITY INVOKER` for the normal view.
To change SQL security for an existing view, use
```sql
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
```
### Examples sql security
```sql
CREATE test_view
DEFINER = alice SQL SECURITY DEFINER
AS SELECT ...
```
```sql
CREATE test_view
SQL SECURITY INVOKER
AS SELECT ...
```
## Live View [Deprecated]
This feature is deprecated and will be removed in the future.

View File

@ -114,6 +114,7 @@ Hierarchy of privileges:
- `ALTER VIEW`
- `ALTER VIEW REFRESH`
- `ALTER VIEW MODIFY QUERY`
- `ALTER VIEW MODIFY SQL SECURITY`
- [CREATE](#grant-create)
- `CREATE DATABASE`
- `CREATE TABLE`
@ -307,6 +308,7 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries
- `ALTER VIEW` Level: `GROUP`
- `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
- `ALTER VIEW MODIFY QUERY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY QUERY`
- `ALTER VIEW MODIFY SQL SECURITY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY SQL SECURITY`
Examples of how this hierarchy is treated:
@ -409,6 +411,7 @@ Allows a user to execute queries that manage users, roles and row policies.
- `SHOW_ROW_POLICIES`. Level: `GLOBAL`. Aliases: `SHOW POLICIES`, `SHOW CREATE ROW POLICY`, `SHOW CREATE POLICY`
- `SHOW_QUOTAS`. Level: `GLOBAL`. Aliases: `SHOW CREATE QUOTA`
- `SHOW_SETTINGS_PROFILES`. Level: `GLOBAL`. Aliases: `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`, `SHOW CREATE PROFILE`
- `ALLOW SQL SECURITY NONE`. Level: `GLOBAL`. Aliases: `CREATE SQL SECURITY NONE`, `SQL SECURITY NONE`, `SECURITY NONE`
The `ROLE ADMIN` privilege allows a user to assign and revoke any roles including those which are not assigned to the user with the admin option.

View File

@ -11,7 +11,9 @@ sidebar_label: "Представление"
## Обычные представления {#normal}
``` sql
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] AS SELECT ...
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ...
```
Обычные представления не хранят никаких данных, они выполняют чтение данных из другой таблицы при каждом доступе. Другими словами, обычное представление — это не что иное, как сохраненный запрос. При чтении данных из представления этот сохраненный запрос используется как подзапрос в секции [FROM](../../../sql-reference/statements/select/from.md).
@ -37,7 +39,9 @@ SELECT a, b, c FROM (SELECT ...)
## Материализованные представления {#materialized}
``` sql
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ...
```
Материализованные (MATERIALIZED) представления хранят данные, преобразованные соответствующим запросом [SELECT](../../../sql-reference/statements/select/index.md).
@ -66,6 +70,52 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na
Чтобы удалить представление, следует использовать [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Впрочем, `DROP TABLE` тоже работает для представлений.
## SQL безопасность {#sql_security}
Параметры `DEFINER` и `SQL SECURITY` позволяют задать правило от имени какого пользователя будут выполняться запросы к таблицам, на которые ссылается представление.
Для `SQL SECURITY` допустимо три значения: `DEFINER`, `INVOKER`, или `NONE`.
Для `DEFINER` можно указать имя любого существующего пользователя или же `CURRENT_USER`.
Далее приведена таблица, объясняющая какие права необходимы каким пользователям при заданных параметрах SQL безопасности.
Обратите внимание, что, в независимости от заданных параметров SQL безопасности,
у пользователя должно быть право `GRANT SELECT ON <view>` для чтения из представления.
| SQL security option | View | Materialized View |
|---------------------|----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
| `DEFINER alice` | У `alice` должно быть право `SELECT` на таблицу-источник. | У `alice` должны быть права `SELECT` на таблицу-источник и `INSERT` на таблицу-назначение. |
| `INVOKER` | У пользователя выполняющего запрос к представлению должно быть право `SELECT` на таблицу-источник. | Тип `SQL SECURITY INVOKER` не может быть указан для материализованных представлений. |
| `NONE` | - | - |
:::note
Тип `SQL SECURITY NONE` не безопасен для использования. Любой пользователь с правом создавать представления с `SQL SECURITY NONE` сможет исполнять любые запросы без проверки прав.
По умолчанию, у пользователей нет прав указывать `SQL SECURITY NONE`, однако, при необходимости, это право можно выдать с помощью `GRANT ALLOW SQL SECURITY NONE TO <user>`.
:::
Если `DEFINER`/`SQL SECURITY` не указан, будут использованы значения по умолчанию:
- `SQL SECURITY`: `INVOKER` для обычных представлений и `DEFINER` для материализованных ([изменяется в настройках](../../../operations/settings/settings.md#default_normal_view_sql_security))
- `DEFINER`: `CURRENT_USER` ([изменяется в настройках](../../../operations/settings/settings.md#default_view_definer))
Если представление подключается с помощью ключевого слова `ATTACH` и настройки SQL безопасности не были заданы,
то по умолчанию будет использоваться `SQL SECURITY NONE` для материализованных представлений и `SQL SECURITY INVOKER` для обычных.
Изменить параметры SQL безопасности возможно с помощью следующего запроса:
```sql
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
```
### Примеры представлений с SQL безопасностью
```sql
CREATE test_view
DEFINER = alice SQL SECURITY DEFINER
AS SELECT ...
```
```sql
CREATE test_view
SQL SECURITY INVOKER
AS SELECT ...
```
## LIVE-представления [экспериментальный функционал] {#live-view}
:::note Важно

View File

@ -330,6 +330,7 @@ try
processConfig();
adjustSettings();
initTTYBuffer(toProgressOption(config().getString("progress", "default")));
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
{
// All that just to set DB::CurrentThread::get().getGlobalContext()

View File

@ -91,8 +91,8 @@ static std::vector<std::string> extractFromConfig(
zkutil::validateZooKeeperConfig(*bootstrap_configuration);
zkutil::ZooKeeperPtr zookeeper = std::make_shared<zkutil::ZooKeeper>(
*bootstrap_configuration, bootstrap_configuration->has("zookeeper") ? "zookeeper" : "keeper", nullptr);
zkutil::ZooKeeperPtr zookeeper = zkutil::ZooKeeper::createWithoutKillingPreviousSessions(
*bootstrap_configuration, bootstrap_configuration->has("zookeeper") ? "zookeeper" : "keeper");
zkutil::ZooKeeperNodeCache zk_node_cache([&] { return zookeeper; });
config_xml = processor.processConfig(&has_zk_includes, &zk_node_cache);

View File

@ -400,7 +400,7 @@ int KeeperClient::main(const std::vector<String> & /* args */)
zk_args.connection_timeout_ms = config().getInt("connection-timeout", 10) * 1000;
zk_args.session_timeout_ms = config().getInt("session-timeout", 10) * 1000;
zk_args.operation_timeout_ms = config().getInt("operation-timeout", 10) * 1000;
zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_args);
zookeeper = zkutil::ZooKeeper::createWithoutKillingPreviousSessions(zk_args);
if (config().has("no-confirmation") || config().has("query"))
ask_confirmation = false;

View File

@ -506,6 +506,7 @@ try
processConfig();
adjustSettings();
initTTYBuffer(toProgressOption(config().getString("progress", "default")));
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
applyCmdSettings(global_context);

View File

@ -623,6 +623,8 @@ try
ServerSettings server_settings;
server_settings.loadSettingsFromConfig(config());
ASTAlterCommand::setFormatAlterCommandsWithParentheses(server_settings.format_alter_operations_with_parentheses);
StackTrace::setShowAddresses(server_settings.show_addresses_in_stack_traces);
#if USE_HDFS

View File

@ -103,6 +103,7 @@ namespace
const Flags & getColumnFlags() const { return all_flags_for_target[COLUMN]; }
const Flags & getDictionaryFlags() const { return all_flags_for_target[DICTIONARY]; }
const Flags & getNamedCollectionFlags() const { return all_flags_for_target[NAMED_COLLECTION]; }
const Flags & getUserNameFlags() const { return all_flags_for_target[USER_NAME]; }
const Flags & getAllFlagsGrantableOnGlobalLevel() const { return getAllFlags(); }
const Flags & getAllFlagsGrantableOnGlobalWithParameterLevel() const { return getGlobalWithParameterFlags(); }
const Flags & getAllFlagsGrantableOnDatabaseLevel() const { return all_flags_grantable_on_database_level; }
@ -121,6 +122,7 @@ namespace
COLUMN,
DICTIONARY,
NAMED_COLLECTION,
USER_NAME,
};
struct Node;
@ -300,7 +302,7 @@ namespace
collectAllFlags(child.get());
all_flags_grantable_on_table_level = all_flags_for_target[TABLE] | all_flags_for_target[DICTIONARY] | all_flags_for_target[COLUMN];
all_flags_grantable_on_global_with_parameter_level = all_flags_for_target[NAMED_COLLECTION];
all_flags_grantable_on_global_with_parameter_level = all_flags_for_target[NAMED_COLLECTION] | all_flags_for_target[USER_NAME];
all_flags_grantable_on_database_level = all_flags_for_target[DATABASE] | all_flags_grantable_on_table_level;
}
@ -351,7 +353,7 @@ namespace
std::unordered_map<std::string_view, Flags> keyword_to_flags_map;
std::vector<Flags> access_type_to_flags_mapping;
Flags all_flags;
Flags all_flags_for_target[static_cast<size_t>(NAMED_COLLECTION) + 1];
Flags all_flags_for_target[static_cast<size_t>(USER_NAME) + 1];
Flags all_flags_grantable_on_database_level;
Flags all_flags_grantable_on_table_level;
Flags all_flags_grantable_on_global_with_parameter_level;
@ -371,7 +373,11 @@ std::unordered_map<AccessFlags::ParameterType, AccessFlags> AccessFlags::splitIn
if (named_collection_flags)
result.emplace(ParameterType::NAMED_COLLECTION, named_collection_flags);
auto other_flags = (~AccessFlags::allNamedCollectionFlags()) & *this;
auto user_flags = AccessFlags::allUserNameFlags() & *this;
if (user_flags)
result.emplace(ParameterType::USER_NAME, user_flags);
auto other_flags = (~named_collection_flags & ~user_flags) & *this;
if (other_flags)
result.emplace(ParameterType::NONE, other_flags);
@ -387,6 +393,9 @@ AccessFlags::ParameterType AccessFlags::getParameterType() const
if (AccessFlags::allNamedCollectionFlags().contains(*this))
return AccessFlags::NAMED_COLLECTION;
if (AccessFlags::allUserNameFlags().contains(*this))
return AccessFlags::USER_NAME;
throw Exception(ErrorCodes::MIXED_ACCESS_PARAMETER_TYPES, "Having mixed parameter types: {}", toString());
}
@ -405,6 +414,7 @@ AccessFlags AccessFlags::allTableFlags() { return Helper::instance().getTableFla
AccessFlags AccessFlags::allColumnFlags() { return Helper::instance().getColumnFlags(); }
AccessFlags AccessFlags::allDictionaryFlags() { return Helper::instance().getDictionaryFlags(); }
AccessFlags AccessFlags::allNamedCollectionFlags() { return Helper::instance().getNamedCollectionFlags(); }
AccessFlags AccessFlags::allUserNameFlags() { return Helper::instance().getUserNameFlags(); }
AccessFlags AccessFlags::allFlagsGrantableOnGlobalLevel() { return Helper::instance().getAllFlagsGrantableOnGlobalLevel(); }
AccessFlags AccessFlags::allFlagsGrantableOnGlobalWithParameterLevel() { return Helper::instance().getAllFlagsGrantableOnGlobalWithParameterLevel(); }
AccessFlags AccessFlags::allFlagsGrantableOnDatabaseLevel() { return Helper::instance().getAllFlagsGrantableOnDatabaseLevel(); }

View File

@ -57,6 +57,7 @@ public:
{
NONE,
NAMED_COLLECTION,
USER_NAME,
};
ParameterType getParameterType() const;
std::unordered_map<ParameterType, AccessFlags> splitIntoParameterTypes() const;
@ -103,6 +104,9 @@ public:
/// Returns all the flags related to a named collection.
static AccessFlags allNamedCollectionFlags();
/// Returns all the flags related to a user.
static AccessFlags allUserNameFlags();
/// Returns all the flags which could be granted on the global level.
/// The same as allFlags().
static AccessFlags allFlagsGrantableOnGlobalLevel();

View File

@ -12,7 +12,7 @@ enum class AccessType
/// Macro M should be defined as M(name, aliases, node_type, parent_group_name)
/// where name is identifier with underscores (instead of spaces);
/// aliases is a string containing comma-separated list;
/// node_type either specifies access type's level (GLOBAL/NAMED_COLLECTION/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS),
/// node_type either specifies access type's level (GLOBAL/NAMED_COLLECTION/USER_NAME/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS),
/// or specifies that the access type is a GROUP of other access types;
/// parent_group_name is the name of the group containing this access type (or NONE if there is no such group).
/// NOTE A parent group must be declared AFTER all its children.
@ -83,6 +83,7 @@ enum class AccessType
M(ALTER_VIEW_REFRESH, "ALTER LIVE VIEW REFRESH, REFRESH VIEW", VIEW, ALTER_VIEW) \
M(ALTER_VIEW_MODIFY_QUERY, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \
M(ALTER_VIEW_MODIFY_REFRESH, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \
M(ALTER_VIEW_MODIFY_SQL_SECURITY, "ALTER TABLE MODIFY SQL SECURITY", VIEW, ALTER_VIEW) \
M(ALTER_VIEW, "", GROUP, ALTER) /* allows to execute ALTER VIEW REFRESH, ALTER VIEW MODIFY QUERY, ALTER VIEW MODIFY REFRESH;
implicitly enabled by the grant ALTER_TABLE */\
\
@ -139,6 +140,7 @@ enum class AccessType
M(CREATE_SETTINGS_PROFILE, "CREATE PROFILE", GLOBAL, ACCESS_MANAGEMENT) \
M(ALTER_SETTINGS_PROFILE, "ALTER PROFILE", GLOBAL, ACCESS_MANAGEMENT) \
M(DROP_SETTINGS_PROFILE, "DROP PROFILE", GLOBAL, ACCESS_MANAGEMENT) \
M(ALLOW_SQL_SECURITY_NONE, "CREATE SQL SECURITY NONE, ALLOW SQL SECURITY NONE, SQL SECURITY NONE, SECURITY NONE", GLOBAL, ACCESS_MANAGEMENT) \
M(SHOW_USERS, "SHOW CREATE USER", GLOBAL, SHOW_ACCESS) \
M(SHOW_ROLES, "SHOW CREATE ROLE", GLOBAL, SHOW_ACCESS) \
M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", TABLE, SHOW_ACCESS) \
@ -150,6 +152,7 @@ enum class AccessType
M(SHOW_NAMED_COLLECTIONS_SECRETS, "SHOW NAMED COLLECTIONS SECRETS", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
M(NAMED_COLLECTION, "NAMED COLLECTION USAGE, USE NAMED COLLECTION", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
M(NAMED_COLLECTION_ADMIN, "NAMED COLLECTION CONTROL", NAMED_COLLECTION, ALL) \
M(SET_DEFINER, "", USER_NAME, ALL) \
\
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \
M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \

View File

@ -0,0 +1,9 @@
#pragma once
#include <Core/Types.h>
enum class SQLSecurityType
{
INVOKER,
DEFINER,
NONE,
};

View File

@ -53,7 +53,8 @@ TEST(AccessRights, Union)
"SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, "
"SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, "
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION ADMIN ON db1");
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, "
"GRANT SET DEFINER ON db1, GRANT NAMED COLLECTION ADMIN ON db1");
}

View File

@ -20,6 +20,9 @@ struct BackupOperationInfo
/// Base Backup Operation name, a string like "Disk('backups', 'my_base_backup')"
String base_backup_name;
/// Query ID of a query that started backup
String query_id;
/// This operation is internal and should not be shown in system.backups
bool internal = false;

View File

@ -440,7 +440,13 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context
try
{
addInfo(backup_id, backup_name_for_logging, base_backup_name, backup_settings.internal, context->getProcessListElement(), BackupStatus::CREATING_BACKUP);
addInfo(backup_id,
backup_name_for_logging,
base_backup_name,
context->getCurrentQueryId(),
backup_settings.internal,
context->getProcessListElement(),
BackupStatus::CREATING_BACKUP);
/// Prepare context to use.
ContextPtr context_in_use = context;
@ -823,7 +829,13 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt
if (restore_settings.base_backup_info)
base_backup_name = restore_settings.base_backup_info->toStringForLogging();
addInfo(restore_id, backup_name_for_logging, base_backup_name, restore_settings.internal, context->getProcessListElement(), BackupStatus::RESTORING);
addInfo(restore_id,
backup_name_for_logging,
base_backup_name,
context->getCurrentQueryId(),
restore_settings.internal,
context->getProcessListElement(),
BackupStatus::RESTORING);
/// Prepare context to use.
ContextMutablePtr context_in_use = context;
@ -1108,13 +1120,15 @@ void BackupsWorker::restoreTablesData(const OperationID & restore_id, BackupPtr
}
void BackupsWorker::addInfo(const OperationID & id, const String & name, const String & base_backup_name, bool internal, QueryStatusPtr process_list_element, BackupStatus status)
void BackupsWorker::addInfo(const OperationID & id, const String & name, const String & base_backup_name, const String & query_id,
bool internal, QueryStatusPtr process_list_element, BackupStatus status)
{
ExtendedOperationInfo extended_info;
auto & info = extended_info.info;
info.id = id;
info.name = name;
info.base_backup_name = base_backup_name;
info.query_id = query_id;
info.internal = internal;
info.status = status;
info.start_time = std::chrono::system_clock::now();
@ -1183,7 +1197,7 @@ void BackupsWorker::setStatus(const String & id, BackupStatus status, bool throw
if (isFailedOrCancelled(status))
{
info.error_message = getCurrentExceptionMessage(false);
info.error_message = getCurrentExceptionMessage(true /*with_stacktrace*/);
info.exception = std::current_exception();
}

View File

@ -108,7 +108,8 @@ private:
/// Run data restoring tasks which insert data to tables.
void restoreTablesData(const BackupOperationID & restore_id, BackupPtr backup, DataRestoreTasks && tasks, ThreadPool & thread_pool, QueryStatusPtr process_list_element);
void addInfo(const BackupOperationID & id, const String & name, const String & base_backup_name, bool internal, QueryStatusPtr process_list_element, BackupStatus status);
void addInfo(const BackupOperationID & id, const String & name, const String & base_backup_name, const String & query_id,
bool internal, QueryStatusPtr process_list_element, BackupStatus status);
void setStatus(const BackupOperationID & id, BackupStatus status, bool throw_if_error = true);
void setStatusSafe(const String & id, BackupStatus status) { setStatus(id, status, false); }
void setNumFilesAndSize(const BackupOperationID & id, size_t num_files, UInt64 total_size, size_t num_entries,

View File

@ -16,6 +16,7 @@
#include <base/find_symbols.h>
#include <base/sort.h>
#include <base/getFQDNOrHostName.h>
#include <Core/ServerUUID.h>
#include "Common/ZooKeeper/IKeeper.h"
#include <Common/DNSResolver.h>
#include <Common/StringUtils/StringUtils.h>
@ -375,7 +376,7 @@ void ZooKeeper::createAncestors(const std::string & path)
}
Coordination::Responses responses;
Coordination::Error code = multiImpl(create_ops, responses);
Coordination::Error code = multiImpl(create_ops, responses, /*check_session_valid*/ false);
if (code == Coordination::Error::ZOK)
return;
@ -638,12 +639,22 @@ Coordination::Error ZooKeeper::trySet(const std::string & path, const std::strin
}
Coordination::Error ZooKeeper::multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses)
Coordination::Error ZooKeeper::multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid)
{
if (requests.empty())
return Coordination::Error::ZOK;
auto future_result = asyncTryMultiNoThrow(requests);
std::future<Coordination::MultiResponse> future_result;
if (check_session_valid)
{
Coordination::Requests new_requests = requests;
addCheckSessionOp(new_requests);
future_result = asyncTryMultiNoThrow(new_requests);
}
else
{
future_result = asyncTryMultiNoThrow(requests);
}
if (future_result.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
@ -655,21 +666,35 @@ Coordination::Error ZooKeeper::multiImpl(const Coordination::Requests & requests
auto response = future_result.get();
Coordination::Error code = response.error;
responses = response.responses;
if (check_session_valid)
{
if (code != Coordination::Error::ZOK && !Coordination::isHardwareError(code) && getFailedOpIndex(code, responses) == requests.size())
{
impl->finalize(fmt::format("Session was killed: {}", requests.back()->getPath()));
code = Coordination::Error::ZSESSIONMOVED;
}
responses.pop_back();
/// For some reason, for hardware errors we set ZOK codes for all responses.
/// In other cases, if the multi-request status is not ZOK, then the last response status must indicate an error too
chassert(code == Coordination::Error::ZOK || Coordination::isHardwareError(code) || responses.back()->error != Coordination::Error::ZOK);
}
return code;
}
}
Coordination::Responses ZooKeeper::multi(const Coordination::Requests & requests)
Coordination::Responses ZooKeeper::multi(const Coordination::Requests & requests, bool check_session_valid)
{
Coordination::Responses responses;
Coordination::Error code = multiImpl(requests, responses);
Coordination::Error code = multiImpl(requests, responses, check_session_valid);
KeeperMultiException::check(code, requests, responses);
return responses;
}
Coordination::Error ZooKeeper::tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses)
Coordination::Error ZooKeeper::tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid)
{
Coordination::Error code = multiImpl(requests, responses);
Coordination::Error code = multiImpl(requests, responses, check_session_valid);
if (code != Coordination::Error::ZOK && !Coordination::isUserError(code))
throw KeeperException(code);
return code;
@ -935,12 +960,40 @@ Coordination::ReconfigResponse ZooKeeper::reconfig(
return future_result.get();
}
ZooKeeperPtr ZooKeeper::create(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, std::shared_ptr<DB::ZooKeeperLog> zk_log_)
{
auto res = std::shared_ptr<ZooKeeper>(new ZooKeeper(config, config_name, zk_log_));
res->initSession();
return res;
}
ZooKeeperPtr ZooKeeper::startNewSession() const
{
return std::make_shared<ZooKeeper>(args, zk_log);
auto res = std::shared_ptr<ZooKeeper>(new ZooKeeper(args, zk_log));
res->initSession();
return res;
}
void ZooKeeper::initSession()
{
String session_path = fs::path(args.sessions_path) / args.zookeeper_name / toString(DB::ServerUUID::get());
Coordination::Stat stat;
if (trySet(session_path, "", -1, &stat) == Coordination::Error::ZOK)
{
session_node_version = stat.version;
return;
}
createAncestors(session_path);
create(session_path, "", zkutil::CreateMode::Persistent);
session_node_version = 0;
}
void ZooKeeper::addCheckSessionOp(Coordination::Requests & requests) const
{
String session_path = fs::path(args.sessions_path) / args.zookeeper_name / toString(DB::ServerUUID::get());
requests.push_back(zkutil::makeCheckRequest(session_path, session_node_version));
}
bool ZooKeeper::expired()
{
@ -1243,11 +1296,11 @@ std::future<Coordination::MultiResponse> ZooKeeper::asyncMulti(const Coordinatio
return future;
}
Coordination::Error ZooKeeper::tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses)
Coordination::Error ZooKeeper::tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid)
{
try
{
return multiImpl(requests, responses);
return multiImpl(requests, responses, check_session_valid);
}
catch (const Coordination::Exception & e)
{

View File

@ -198,11 +198,6 @@ class ZooKeeper
/// ZooKeeperWithFaultInjection wants access to `impl` pointer to reimplement some async functions with faults
friend class DB::ZooKeeperWithFaultInjection;
public:
using Ptr = std::shared_ptr<ZooKeeper>;
using ErrorsList = std::initializer_list<Coordination::Error>;
explicit ZooKeeper(const ZooKeeperArgs & args_, std::shared_ptr<DB::ZooKeeperLog> zk_log_ = nullptr);
/** Config of the form:
@ -227,10 +222,27 @@ public:
<identity>user:password</identity>
</zookeeper>
*/
ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, std::shared_ptr<DB::ZooKeeperLog> zk_log_);
ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std::string & config_name, std::shared_ptr<DB::ZooKeeperLog> zk_log_ = nullptr);
/// See addCheckSessionOp
void initSession();
public:
using Ptr = std::shared_ptr<ZooKeeper>;
using ErrorsList = std::initializer_list<Coordination::Error>;
std::vector<ShuffleHost> shuffleHosts() const;
static Ptr create(const Poco::Util::AbstractConfiguration & config,
const std::string & config_name,
std::shared_ptr<DB::ZooKeeperLog> zk_log_);
template <typename... Args>
static Ptr createWithoutKillingPreviousSessions(Args &&... args)
{
return std::shared_ptr<ZooKeeper>(new ZooKeeper(std::forward<Args>(args)...));
}
/// Creates a new session with the same parameters. This method can be used for reconnecting
/// after the session has expired.
/// This object remains unchanged, and the new session is returned.
@ -427,13 +439,14 @@ public:
/// Performs several operations in a transaction.
/// Throws on every error.
Coordination::Responses multi(const Coordination::Requests & requests);
/// For check_session_valid see addCheckSessionOp
Coordination::Responses multi(const Coordination::Requests & requests, bool check_session_valid = false);
/// Throws only if some operation has returned an "unexpected" error - an error that would cause
/// the corresponding try- method to throw.
/// On exception, `responses` may or may not be populated.
Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses);
Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid = false);
/// Throws nothing (even session expired errors)
Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses);
Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid = false);
std::string sync(const std::string & path);
@ -587,6 +600,22 @@ public:
const DB::KeeperFeatureFlags * getKeeperFeatureFlags() const { return impl->getKeeperFeatureFlags(); }
/// Checks that our session was not killed, and allows to avoid applying a request from an old lost session.
/// Imagine a "connection-loss-on-commit" situation like this:
/// - We have written some write requests to the socket and immediately disconnected (e.g. due to "Operation timeout")
/// - The requests were sent, but the destination [Zoo]Keeper host will receive it later (it doesn't know about our requests yet)
/// - We don't know the status of our requests
/// - We connect to another [Zoo]Keeper replica with a new session, and do some reads
/// to find out the status of our requests. We see that they were not committed.
/// - The packets from our old session finally arrive to the destination [Zoo]Keeper host. The requests get processed.
/// - Changes are committed (although, we have already seen that they are not)
///
/// We need a way to reject requests from old sessions somehow.
///
/// So we update the version of /clickhouse/sessions/server_uuid node when starting a new session.
/// And there's an option to check this version when committing something.
void addCheckSessionOp(Coordination::Requests & requests) const;
private:
void init(ZooKeeperArgs args_);
@ -602,7 +631,7 @@ private:
Coordination::Stat * stat,
Coordination::WatchCallbackPtr watch_callback,
Coordination::ListRequestType list_request_type);
Coordination::Error multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses);
Coordination::Error multiImpl(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid);
Coordination::Error existsImpl(const std::string & path, Coordination::Stat * stat_, Coordination::WatchCallback watch_callback);
Coordination::Error syncImpl(const std::string & path, std::string & returned_path);
@ -654,6 +683,8 @@ private:
std::shared_ptr<DB::ZooKeeperLog> zk_log;
AtomicStopwatch session_uptime;
int32_t session_node_version;
};

View File

@ -132,6 +132,8 @@ void ZooKeeperArgs::initFromKeeperServerSection(const Poco::Util::AbstractConfig
void ZooKeeperArgs::initFromKeeperSection(const Poco::Util::AbstractConfiguration & config, const std::string & config_name)
{
zookeeper_name = config_name;
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(config_name, keys);
@ -193,6 +195,10 @@ void ZooKeeperArgs::initFromKeeperSection(const Poco::Util::AbstractConfiguratio
{
chroot = config.getString(config_name + "." + key);
}
else if (key == "sessions_path")
{
sessions_path = config.getString(config_name + "." + key);
}
else if (key == "implementation")
{
implementation = config.getString(config_name + "." + key);

View File

@ -29,11 +29,13 @@ struct ZooKeeperArgs
ZooKeeperArgs() = default;
bool operator == (const ZooKeeperArgs &) const = default;
String zookeeper_name = "zookeeper";
String implementation = "zookeeper";
Strings hosts;
String auth_scheme;
String identity;
String chroot;
String sessions_path = "/clickhouse/sessions";
int32_t connection_timeout_ms = Coordination::DEFAULT_CONNECTION_TIMEOUT_MS;
int32_t session_timeout_ms = Coordination::DEFAULT_SESSION_TIMEOUT_MS;
int32_t operation_timeout_ms = Coordination::DEFAULT_OPERATION_TIMEOUT_MS;

View File

@ -401,9 +401,6 @@ ZooKeeper::ZooKeeper(
keeper_feature_flags.logFlags(log);
ProfileEvents::increment(ProfileEvents::ZooKeeperInit);
/// Avoid stale reads after connecting
sync("/", [](const SyncResponse &){});
}
catch (...)
{

View File

@ -336,14 +336,14 @@ Coordination::Error ZooKeeperWithFaultInjection::tryCreate(const std::string & p
return tryCreate(path, data, mode, path_created);
}
Coordination::Responses ZooKeeperWithFaultInjection::multi(const Coordination::Requests & requests)
Coordination::Responses ZooKeeperWithFaultInjection::multi(const Coordination::Requests & requests, bool check_session_valid)
{
return executeWithFaultSync(
__func__,
!requests.empty() ? requests.front()->getPath() : "",
[&]()
{
auto responses = keeper->multi(requests);
auto responses = keeper->multi(requests, check_session_valid);
if (unlikely(fault_policy))
multiResponseSaveEphemeralNodePaths(requests, responses);
return responses;
@ -420,14 +420,14 @@ void ZooKeeperWithFaultInjection::deleteEphemeralNodeIfContentMatches(
__func__, path, [&]() { return keeper->deleteEphemeralNodeIfContentMatches(path, fast_delete_if_equal_value); });
}
Coordination::Error ZooKeeperWithFaultInjection::tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses)
Coordination::Error ZooKeeperWithFaultInjection::tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid)
{
return executeWithFaultSync(
__func__,
!requests.empty() ? requests.front()->getPath() : "",
[&]()
{
auto code = keeper->tryMulti(requests, responses);
auto code = keeper->tryMulti(requests, responses, check_session_valid);
if (unlikely(fault_policy) && code == Coordination::Error::ZOK)
multiResponseSaveEphemeralNodePaths(requests, responses);
return code;
@ -435,11 +435,11 @@ Coordination::Error ZooKeeperWithFaultInjection::tryMulti(const Coordination::Re
}
Coordination::Error
ZooKeeperWithFaultInjection::tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses)
ZooKeeperWithFaultInjection::tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid)
{
try
{
return tryMulti(requests, responses);
return tryMulti(requests, responses, check_session_valid);
}
catch (const Coordination::Exception & e)
{

View File

@ -212,7 +212,7 @@ public:
Coordination::Error tryCreate(const std::string & path, const std::string & data, int32_t mode);
Coordination::Responses multi(const Coordination::Requests & requests);
Coordination::Responses multi(const Coordination::Requests & requests, bool check_session_valid = false);
void createIfNotExists(const std::string & path, const std::string & data);
@ -242,9 +242,9 @@ public:
void deleteEphemeralNodeIfContentMatches(const std::string & path, const std::string & fast_delete_if_equal_value);
Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses);
Coordination::Error tryMulti(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid = false);
Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses);
Coordination::Error tryMultiNoThrow(const Coordination::Requests & requests, Coordination::Responses & responses, bool check_session_valid = false);
///
/// mirror ZooKeeper interface: Async functions

View File

@ -5,9 +5,9 @@
int main(int argc, char ** argv)
try
{
zkutil::ZooKeeper zookeeper{zkutil::ZooKeeperArgs("localhost:2181")};
auto zookeeper = zkutil::ZooKeeper::createWithoutKillingPreviousSessions(zkutil::ZooKeeperArgs("localhost:2181"));
auto nodes = zookeeper.getChildren("/tmp");
auto nodes = zookeeper->getChildren("/tmp");
if (argc < 2)
{
@ -26,7 +26,7 @@ try
std::vector<std::future<Coordination::GetResponse>> futures;
futures.reserve(nodes.size());
for (auto & node : nodes)
futures.push_back(zookeeper.asyncGet("/tmp/" + node));
futures.push_back(zookeeper->asyncGet("/tmp/" + node));
for (auto & future : futures)
std::cerr << (future.get().data.empty() ? ',' : '.');

View File

@ -16,34 +16,34 @@ try
return 1;
}
ZooKeeper zk{zkutil::ZooKeeperArgs(argv[1])};
auto zk = ZooKeeper::createWithoutKillingPreviousSessions(zkutil::ZooKeeperArgs(argv[1]));
std::cout << "create path" << std::endl;
zk.create("/test", "old", zkutil::CreateMode::Persistent);
zk->create("/test", "old", zkutil::CreateMode::Persistent);
Coordination::Stat stat;
zkutil::EventPtr watch = std::make_shared<Poco::Event>();
std::cout << "get path" << std::endl;
zk.get("/test", &stat, watch);
zk->get("/test", &stat, watch);
std::cout << "set path" << std::endl;
zk.set("/test", "new");
zk->set("/test", "new");
watch->wait();
std::cout << "watch happened" << std::endl;
std::cout << "remove path" << std::endl;
std::cout << "list path" << std::endl;
Strings children = zk.getChildren("/");
Strings children = zk->getChildren("/");
for (const auto & name : children)
std::cerr << "\t" << name << "\n";
zk.remove("/test");
zk->remove("/test");
Coordination::Requests ops;
ops.emplace_back(zkutil::makeCreateRequest("/test", "multi1", CreateMode::Persistent));
ops.emplace_back(zkutil::makeSetRequest("/test", "multi2", -1));
ops.emplace_back(zkutil::makeRemoveRequest("/test", -1));
std::cout << "multi" << std::endl;
Coordination::Responses res = zk.multi(ops);
Coordination::Responses res = zk->multi(ops);
std::cout << "path created: " << dynamic_cast<const Coordination::CreateResponse &>(*res[0]).path_created << std::endl;
return 0;

View File

@ -20,7 +20,6 @@
#include <Core/Field.h>
#include <Disks/DiskLocal.h>
namespace DB
{
@ -99,9 +98,12 @@ namespace
void readNode(KeeperStorage::Node & node, ReadBuffer & in, SnapshotVersion version, ACLMap & acl_map)
{
String new_data;
readBinary(new_data, in);
node.setData(new_data);
readVarUInt(node.data_size, in);
if (node.data_size != 0)
{
node.data = std::unique_ptr<char[]>(new char[node.data_size]);
in.readStrict(node.data.get(), node.data_size);
}
if (version >= SnapshotVersion::V1)
{
@ -373,25 +375,36 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
size_t snapshot_container_size;
readBinary(snapshot_container_size, in);
storage.container.reserve(snapshot_container_size);
if (recalculate_digest)
storage.nodes_digest = 0;
for (size_t nodes_read = 0; nodes_read < snapshot_container_size; ++nodes_read)
{
std::string path;
readBinary(path, in);
size_t path_size = 0;
readVarUInt(path_size, in);
chassert(path_size != 0);
auto path_data = storage.container.allocateKey(path_size);
in.readStrict(path_data.get(), path_size);
std::string_view path{path_data.get(), path_size};
KeeperStorage::Node node{};
readNode(node, in, current_version, storage.acl_map);
using enum Coordination::PathMatchResult;
auto match_result = Coordination::matchPath(path, keeper_system_path);
const std::string error_msg = fmt::format("Cannot read node on path {} from a snapshot because it is used as a system node", path);
const auto get_error_msg = [&]
{
return fmt::format("Cannot read node on path {} from a snapshot because it is used as a system node", path);
};
if (match_result == IS_CHILD)
{
if (keeper_context->ignoreSystemPathOnStartup() || keeper_context->getServerState() != KeeperContext::Phase::INIT)
{
LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", error_msg);
LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", get_error_msg());
continue;
}
else
@ -399,7 +412,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
ErrorCodes::LOGICAL_ERROR,
"{}. Ignoring it can lead to data loss. "
"If you still want to ignore it, you can set 'keeper_server.ignore_system_path_on_startup' to true",
error_msg);
get_error_msg());
}
else if (match_result == EXACT)
{
@ -407,7 +420,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
{
if (keeper_context->ignoreSystemPathOnStartup() || keeper_context->getServerState() != KeeperContext::Phase::INIT)
{
LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", error_msg);
LOG_ERROR(getLogger("KeeperSnapshotManager"), "{}. Ignoring it", get_error_msg());
node = KeeperStorage::Node{};
}
else
@ -415,18 +428,25 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
ErrorCodes::LOGICAL_ERROR,
"{}. Ignoring it can lead to data loss. "
"If you still want to ignore it, you can set 'keeper_server.ignore_system_path_on_startup' to true",
error_msg);
get_error_msg());
}
}
storage.container.insertOrReplace(path, node);
if (node.isEphemeral())
storage.ephemerals[node.ephemeralOwner()].insert(path);
auto ephemeral_owner = node.ephemeralOwner();
if (!node.isEphemeral() && node.numChildren() > 0)
node.getChildren().reserve(node.numChildren());
if (ephemeral_owner != 0)
storage.ephemerals[node.ephemeralOwner()].insert(std::string{path});
if (recalculate_digest)
storage.nodes_digest += node.getDigest(path);
storage.container.insertOrReplace(std::move(path_data), path_size, std::move(node));
}
LOG_TRACE(getLogger("KeeperSnapshotManager"), "Building structure for children nodes");
for (const auto & itr : storage.container)
{
if (itr.key != "/")

View File

@ -233,6 +233,38 @@ KeeperStorage::Node::Node(const Node & other)
*this = other;
}
KeeperStorage::Node & KeeperStorage::Node::operator=(Node && other) noexcept
{
if (this == &other)
return *this;
czxid = other.czxid;
mzxid = other.mzxid;
pzxid = other.pzxid;
acl_id = other.acl_id;
mtime = other.mtime;
is_ephemeral_and_ctime = other.is_ephemeral_and_ctime;
ephemeral_or_children_data = other.ephemeral_or_children_data;
version = other.version;
cversion = other.cversion;
aversion = other.aversion;
data_size = other.data_size;
data = std::move(other.data);
other.data_size = 0;
static_assert(std::is_nothrow_move_assignable_v<ChildrenSet>);
children = std::move(other.children);
return *this;
}
KeeperStorage::Node::Node(Node && other) noexcept
{
*this = std::move(other);
}
bool KeeperStorage::Node::empty() const
{
return data_size == 0 && mzxid == 0;

View File

@ -52,9 +52,11 @@ public:
Node() = default;
Node & operator=(const Node & other);
Node(const Node & other);
Node & operator=(Node && other) noexcept;
Node(Node && other) noexcept;
bool empty() const;
bool isEphemeral() const
@ -146,6 +148,7 @@ public:
void removeChild(StringRef child_path);
const auto & getChildren() const noexcept { return children; }
auto & getChildren() { return children; }
// Invalidate the calculated digest so it's recalculated again on the next
// getDigest call

View File

@ -169,6 +169,49 @@ private:
}
}
void insertOrReplace(StringRef key, V value, bool owns_key)
{
size_t hash_value = map.hash(key);
auto new_value_size = value.sizeInBytes();
auto it = map.find(key, hash_value);
uint64_t old_value_size = it == map.end() ? 0 : it->getMapped()->value.sizeInBytes();
if (it == map.end())
{
auto list_key = owns_key ? key : copyStringInArena(arena, key);
ListElem elem{list_key, std::move(value)};
elem.setVersion(current_version);
auto itr = list.insert(list.end(), std::move(elem));
bool inserted;
map.emplace(itr->key, it, inserted, hash_value);
itr->setActiveInMap();
chassert(inserted);
it->getMapped() = itr;
}
else
{
if (owns_key)
arena.free(key.data, key.size);
auto list_itr = it->getMapped();
if (snapshot_mode)
{
ListElem elem{list_itr->key, std::move(value)};
elem.setVersion(current_version);
list_itr->setInactiveInMap();
auto new_list_itr = list.insert(list.end(), std::move(elem));
it->getMapped() = new_list_itr;
snapshot_invalid_iters.push_back(list_itr);
}
else
{
list_itr->value = std::move(value);
}
}
updateDataSize(INSERT_OR_REPLACE, key.size, new_value_size, old_value_size, !snapshot_mode);
}
public:
using iterator = typename List::iterator;
@ -203,41 +246,39 @@ public:
return std::make_pair(it, false);
}
void insertOrReplace(const std::string & key, const V & value)
void reserve(size_t node_num)
{
size_t hash_value = map.hash(key);
auto it = map.find(key, hash_value);
uint64_t old_value_size = it == map.end() ? 0 : it->getMapped()->value.sizeInBytes();
map.reserve(node_num);
}
if (it == map.end())
void insertOrReplace(const std::string & key, V value)
{
insertOrReplace(key, std::move(value), /*owns_key*/ false);
}
struct KeyDeleter
{
void operator()(const char * key)
{
ListElem elem{copyStringInArena(arena, key), value};
elem.setVersion(current_version);
auto itr = list.insert(list.end(), std::move(elem));
bool inserted;
map.emplace(itr->key, it, inserted, hash_value);
itr->setActiveInMap();
chassert(inserted);
it->getMapped() = itr;
if (key)
arena->free(key, size);
}
else
{
auto list_itr = it->getMapped();
if (snapshot_mode)
{
ListElem elem{list_itr->key, value};
elem.setVersion(current_version);
list_itr->setInactiveInMap();
auto new_list_itr = list.insert(list.end(), std::move(elem));
it->getMapped() = new_list_itr;
snapshot_invalid_iters.push_back(list_itr);
}
else
{
list_itr->value = value;
}
}
updateDataSize(INSERT_OR_REPLACE, key.size(), value.sizeInBytes(), old_value_size, !snapshot_mode);
size_t size;
GlobalArena * arena;
};
using KeyPtr = std::unique_ptr<char[], KeyDeleter>;
KeyPtr allocateKey(size_t size)
{
return KeyPtr{new char[size], KeyDeleter{size, &arena}};
}
void insertOrReplace(KeyPtr key_data, size_t key_size, V value)
{
StringRef key{key_data.release(), key_size};
insertOrReplace(key, std::move(value), /*owns_key*/ true);
}
bool erase(const std::string & key)

View File

@ -115,6 +115,7 @@ namespace DB
M(Bool, storage_metadata_write_full_object_key, false, "Write disk metadata files with VERSION_FULL_OBJECT_KEY format", 0) \
M(UInt64, max_materialized_views_count_for_table, 0, "A limit on the number of materialized views attached to a table.", 0) \
M(UInt32, max_database_replicated_create_table_thread_pool_size, 1, "The number of threads to create tables during replica recovery in DatabaseReplicated. Zero means number of threads equal number of cores.", 0) \
M(Bool, format_alter_operations_with_parentheses, false, "If enabled, each operation in alter queries will be surrounded with parentheses in formatted queries to make them less ambiguous.", 0) \
M(String, default_replica_path, "/clickhouse/tables/{uuid}/{shard}", "The path to the table in ZooKeeper", 0) \
M(String, default_replica_name, "{replica}", "The replica name in ZooKeeper", 0) \

View File

@ -868,6 +868,9 @@ class IColumn;
M(Bool, print_pretty_type_names, true, "Print pretty type names in DESCRIBE query and toTypeName() function", 0) \
M(Bool, create_table_empty_primary_key_by_default, false, "Allow to create *MergeTree tables with empty primary key when ORDER BY and PRIMARY KEY not specified", 0) \
M(Bool, allow_named_collection_override_by_default, true, "Allow named collections' fields override by default.", 0)\
M(SQLSecurityType, default_normal_view_sql_security, SQLSecurityType::INVOKER, "Allows to set a default value for SQL SECURITY option when creating a normal view.", 0) \
M(SQLSecurityType, default_materialized_view_sql_security, SQLSecurityType::DEFINER, "Allows to set a default value for SQL SECURITY option when creating a materialized view.", 0) \
M(String, default_view_definer, "CURRENT_USER", "Allows to set a default value for DEFINER option when creating view.", 0) \
M(Bool, allow_experimental_shared_merge_tree, false, "Only available in ClickHouse Cloud", 0) \
M(UInt64, cache_warmer_threads, 4, "Only available in ClickHouse Cloud", 0) \
M(Int64, ignore_cold_parts_seconds, 0, "Only available in ClickHouse Cloud", 0) \

View File

@ -103,7 +103,10 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
{"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."},
{"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."},
{"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."},
}},
{"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"},
{"default_view_definer", "", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"},
{"default_materialized_view_sql_security", "INVOKER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"},
{"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}}},
{"24.1", {{"print_pretty_type_names", false, true, "Better user experience."},
{"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"},
{"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"},

View File

@ -1,5 +1,6 @@
#include <Core/SettingsEnums.h>
#include <magic_enum.hpp>
#include <Access/Common/SQLSecurityDefs.h>
namespace DB
@ -206,4 +207,9 @@ IMPLEMENT_SETTING_ENUM(DateTimeOverflowBehavior, ErrorCodes::BAD_ARGUMENTS,
{{"throw", FormatSettings::DateTimeOverflowBehavior::Throw},
{"ignore", FormatSettings::DateTimeOverflowBehavior::Ignore},
{"saturate", FormatSettings::DateTimeOverflowBehavior::Saturate}})
IMPLEMENT_SETTING_ENUM(SQLSecurityType, ErrorCodes::BAD_ARGUMENTS,
{{"DEFINER", SQLSecurityType::DEFINER},
{"INVOKER", SQLSecurityType::INVOKER},
{"NONE", SQLSecurityType::NONE}})
}

View File

@ -6,6 +6,7 @@
#include <Formats/FormatSettings.h>
#include <IO/ReadSettings.h>
#include <Common/ShellCommandSettings.h>
#include <Parsers/ASTSQLSecurity.h>
namespace DB
@ -266,4 +267,5 @@ DECLARE_SETTING_ENUM(SchemaInferenceMode)
DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOverflowBehavior, FormatSettings::DateTimeOverflowBehavior)
DECLARE_SETTING_ENUM(SQLSecurityType)
}

View File

@ -746,7 +746,12 @@ namespace
break;
}
if (outcome.GetError().GetExceptionName() == "EntityTooLarge" || outcome.GetError().GetExceptionName() == "InvalidRequest" || outcome.GetError().GetExceptionName() == "InvalidArgument")
if (outcome.GetError().GetExceptionName() == "EntityTooLarge" ||
outcome.GetError().GetExceptionName() == "InvalidRequest" ||
outcome.GetError().GetExceptionName() == "InvalidArgument" ||
(outcome.GetError().GetExceptionName() == "InternalError" &&
outcome.GetError().GetResponseCode() == Aws::Http::HttpResponseCode::GATEWAY_TIMEOUT &&
outcome.GetError().GetMessage().contains("use the Rewrite method in the JSON API")))
{
if (!supports_multipart_copy)
{

View File

@ -261,7 +261,7 @@ void AsynchronousInsertQueue::preprocessInsertQuery(const ASTPtr & query, const
InterpreterInsertQuery interpreter(query, query_context, query_context->getSettingsRef().insert_allow_materialized_columns);
auto table = interpreter.getTable(insert_query);
auto sample_block = interpreter.getSampleBlock(insert_query, table, table->getInMemoryMetadataPtr());
auto sample_block = interpreter.getSampleBlock(insert_query, table, table->getInMemoryMetadataPtr(), query_context);
if (!FormatFactory::instance().isInputFormat(insert_query.format))
throw Exception(ErrorCodes::UNKNOWN_FORMAT, "Unknown input format {}", insert_query.format);

View File

@ -28,6 +28,7 @@ ColumnsDescription BackupLogElement::getColumnsDescription()
{"id", std::make_shared<DataTypeString>()},
{"name", std::make_shared<DataTypeString>()},
{"base_backup_name", std::make_shared<DataTypeString>()},
{"query_id", std::make_shared<DataTypeString>()},
{"status", std::make_shared<DataTypeEnum8>(getBackupStatusEnumValues())},
{"error", std::make_shared<DataTypeString>()},
{"start_time", std::make_shared<DataTypeDateTime>()},
@ -51,6 +52,7 @@ void BackupLogElement::appendToBlock(MutableColumns & columns) const
columns[i++]->insert(info.id);
columns[i++]->insert(info.name);
columns[i++]->insert(info.base_backup_name);
columns[i++]->insert(info.query_id);
columns[i++]->insert(static_cast<Int8>(info.status));
columns[i++]->insert(info.error_message);
columns[i++]->insert(static_cast<UInt32>(std::chrono::system_clock::to_time_t(info.start_time)));

View File

@ -794,6 +794,7 @@ ContextMutablePtr Context::createGlobal(ContextSharedPart * shared_part)
{
auto res = std::shared_ptr<Context>(new Context);
res->shared = shared_part;
res->query_access_info = std::make_shared<QueryAccessInfo>();
return res;
}
@ -813,7 +814,9 @@ SharedContextHolder Context::createShared()
ContextMutablePtr Context::createCopy(const ContextPtr & other)
{
SharedLockGuard lock(other->mutex);
return std::shared_ptr<Context>(new Context(*other));
auto new_context = std::shared_ptr<Context>(new Context(*other));
new_context->query_access_info = std::make_shared<QueryAccessInfo>(*other->query_access_info);
return new_context;
}
ContextMutablePtr Context::createCopy(const ContextWeakPtr & other)
@ -1607,12 +1610,12 @@ void Context::addQueryAccessInfo(
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info");
std::lock_guard lock(query_access_info.mutex);
query_access_info.databases.emplace(quoted_database_name);
query_access_info.tables.emplace(full_quoted_table_name);
std::lock_guard lock(query_access_info->mutex);
query_access_info->databases.emplace(quoted_database_name);
query_access_info->tables.emplace(full_quoted_table_name);
for (const auto & column_name : column_names)
query_access_info.columns.emplace(full_quoted_table_name + "." + backQuoteIfNeed(column_name));
query_access_info->columns.emplace(full_quoted_table_name + "." + backQuoteIfNeed(column_name));
}
void Context::addQueryAccessInfo(const Names & partition_names)
@ -1620,9 +1623,9 @@ void Context::addQueryAccessInfo(const Names & partition_names)
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info");
std::lock_guard<std::mutex> lock(query_access_info.mutex);
std::lock_guard<std::mutex> lock(query_access_info->mutex);
for (const auto & partition_name : partition_names)
query_access_info.partitions.emplace(partition_name);
query_access_info->partitions.emplace(partition_name);
}
void Context::addViewAccessInfo(const String & view_name)
@ -1630,8 +1633,8 @@ void Context::addViewAccessInfo(const String & view_name)
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info");
std::lock_guard<std::mutex> lock(query_access_info.mutex);
query_access_info.views.emplace(view_name);
std::lock_guard<std::mutex> lock(query_access_info->mutex);
query_access_info->views.emplace(view_name);
}
void Context::addQueryAccessInfo(const QualifiedProjectionName & qualified_projection_name)
@ -1642,8 +1645,8 @@ void Context::addQueryAccessInfo(const QualifiedProjectionName & qualified_proje
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info");
std::lock_guard<std::mutex> lock(query_access_info.mutex);
query_access_info.projections.emplace(fmt::format(
std::lock_guard<std::mutex> lock(query_access_info->mutex);
query_access_info->projections.emplace(fmt::format(
"{}.{}", qualified_projection_name.storage_id.getFullTableName(), backQuoteIfNeed(qualified_projection_name.projection_name)));
}
@ -2294,7 +2297,8 @@ void Context::setMacros(std::unique_ptr<Macros> && macros)
ContextMutablePtr Context::getQueryContext() const
{
auto ptr = query_context.lock();
if (!ptr) throw Exception(ErrorCodes::THERE_IS_NO_QUERY, "There is no query or query context has expired");
if (!ptr)
throw Exception(ErrorCodes::THERE_IS_NO_QUERY, "There is no query or query context has expired");
return ptr;
}
@ -3152,7 +3156,7 @@ zkutil::ZooKeeperPtr Context::getZooKeeper() const
const auto & config = shared->zookeeper_config ? *shared->zookeeper_config : getConfigRef();
if (!shared->zookeeper)
shared->zookeeper = std::make_shared<zkutil::ZooKeeper>(config, zkutil::getZooKeeperConfigName(config), getZooKeeperLog());
shared->zookeeper = zkutil::ZooKeeper::create(config, zkutil::getZooKeeperConfigName(config), getZooKeeperLog());
else if (shared->zookeeper->hasReachedDeadline())
shared->zookeeper->finalize("ZooKeeper session has reached its deadline");
@ -3364,7 +3368,7 @@ zkutil::ZooKeeperPtr Context::getAuxiliaryZooKeeper(const String & name) const
name);
zookeeper = shared->auxiliary_zookeepers.emplace(name,
std::make_shared<zkutil::ZooKeeper>(config, "auxiliary_zookeepers." + name, getZooKeeperLog())).first;
zkutil::ZooKeeper::create(config, "auxiliary_zookeepers." + name, getZooKeeperLog())).first;
}
else if (zookeeper->second->expired())
zookeeper->second = zookeeper->second->startNewSession();
@ -3397,7 +3401,7 @@ static void reloadZooKeeperIfChangedImpl(
if (zk)
zk->finalize("Config changed");
zk = std::make_shared<zkutil::ZooKeeper>(*config, config_name, std::move(zk_log));
zk = zkutil::ZooKeeper::create(*config, config_name, std::move(zk_log));
if (server_started)
zk->setServerCompletelyStarted();
}

View File

@ -350,8 +350,11 @@ protected:
std::set<std::string> projections{};
std::set<std::string> views{};
};
using QueryAccessInfoPtr = std::shared_ptr<QueryAccessInfo>;
QueryAccessInfo query_access_info;
/// In some situations, we want to be able to transfer the access info from children back to parents (e.g. definers context).
/// Therefore, query_access_info must be a pointer.
QueryAccessInfoPtr query_access_info;
/// Record names of created objects of factories (for testing, etc)
struct QueryFactoriesInfo
@ -676,7 +679,9 @@ public:
const Block * tryGetSpecialScalar(const String & name) const;
void addSpecialScalar(const String & name, const Block & block);
const QueryAccessInfo & getQueryAccessInfo() const { return query_access_info; }
const QueryAccessInfo & getQueryAccessInfo() const { return *getQueryAccessInfoPtr(); }
const QueryAccessInfoPtr getQueryAccessInfoPtr() const { return query_access_info; }
void setQueryAccessInfo(QueryAccessInfoPtr other) { query_access_info = other; }
void addQueryAccessInfo(
const String & quoted_database_name,

View File

@ -555,7 +555,7 @@ void ZooKeeperMetadataTransaction::commit()
if (state != CREATED)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incorrect state ({}), it's a bug", state);
state = FAILED;
current_zookeeper->multi(ops);
current_zookeeper->multi(ops, /* check_session_valid */ true);
state = COMMITTED;
}

View File

@ -8,6 +8,7 @@
#include <Interpreters/AddDefaultDatabaseVisitor.h>
#include <Interpreters/Context.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <Interpreters/MutationsInterpreter.h>
#include <Interpreters/MutationsNonDeterministicHelpers.h>
#include <Interpreters/QueryLog.h>
@ -71,6 +72,13 @@ BlockIO InterpreterAlterQuery::execute()
BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter)
{
for (auto & child : alter.command_list->children)
{
auto * command_ast = child->as<ASTAlterCommand>();
if (command_ast->sql_security)
InterpreterCreateQuery::processSQLSecurityOption(getContext(), command_ast->sql_security->as<ASTSQLSecurity &>());
}
BlockIO res;
if (!UserDefinedSQLFunctionFactory::instance().empty())
@ -487,6 +495,11 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS
required_access.emplace_back(AccessType::ALTER_MODIFY_COMMENT, database, table);
break;
}
case ASTAlterCommand::MODIFY_SQL_SECURITY:
{
required_access.emplace_back(AccessType::ALTER_VIEW_MODIFY_SQL_SECURITY, database, table);
break;
}
}
return required_access;

View File

@ -2,6 +2,9 @@
#include <filesystem>
#include <Access/AccessControl.h>
#include <Access/User.h>
#include "Common/Exception.h"
#include <Common/StringUtils/StringUtils.h>
#include <Common/escapeForFileName.h>
@ -1094,6 +1097,8 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
String current_database = getContext()->getCurrentDatabase();
auto database_name = create.database ? create.getDatabase() : current_database;
if (create.sql_security)
processSQLSecurityOption(getContext(), create.sql_security->as<ASTSQLSecurity &>(), create.attach, create.is_materialized_view);
DDLGuardPtr ddl_guard;
@ -1880,6 +1885,61 @@ void InterpreterCreateQuery::addColumnsDescriptionToCreateQueryIfNecessary(ASTCr
}
}
void InterpreterCreateQuery::processSQLSecurityOption(ContextPtr context_, ASTSQLSecurity & sql_security, bool is_attach, bool is_materialized_view)
{
/// If no SQL security is specified, apply default from default_*_view_sql_security setting.
if (!sql_security.type.has_value())
{
SQLSecurityType default_security;
if (is_materialized_view)
default_security = context_->getSettingsRef().default_materialized_view_sql_security;
else
default_security = context_->getSettingsRef().default_normal_view_sql_security;
if (default_security == SQLSecurityType::DEFINER)
{
String default_definer = context_->getSettingsRef().default_view_definer;
if (default_definer == "CURRENT_USER")
sql_security.is_definer_current_user = true;
else
sql_security.definer = std::make_shared<ASTUserNameWithHost>(default_definer);
}
sql_security.type = default_security;
}
/// Resolves `DEFINER = CURRENT_USER`. Can change the SQL security type if we try to resolve the user during the attachment.
const auto current_user_name = context_->getUserName();
if (sql_security.is_definer_current_user)
{
if (current_user_name.empty())
/// This can happen only when attaching a view for the first time after migration and with `CURRENT_USER` default.
if (is_materialized_view)
sql_security.type = SQLSecurityType::NONE;
else
sql_security.type = SQLSecurityType::INVOKER;
else if (sql_security.definer)
sql_security.definer->replace(current_user_name);
else
sql_security.definer = std::make_shared<ASTUserNameWithHost>(current_user_name);
}
/// Checks the permissions for the specified definer user.
if (sql_security.definer && !sql_security.is_definer_current_user && !is_attach)
{
const auto definer_name = sql_security.definer->toString();
/// Validate that the user exists.
context_->getAccessControl().getID<User>(definer_name);
if (definer_name != current_user_name)
context_->checkAccess(AccessType::SET_DEFINER, definer_name);
}
if (sql_security.type == SQLSecurityType::NONE && !is_attach)
context_->checkAccess(AccessType::ALLOW_SQL_SECURITY_NONE);
}
void registerInterpreterCreateQuery(InterpreterFactory & factory)
{
auto create_fn = [] (const InterpreterFactory::Arguments & args)

View File

@ -80,6 +80,9 @@ public:
void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr) const override;
/// Check access right, validate definer statement and replace `CURRENT USER` with actual name.
static void processSQLSecurityOption(ContextPtr context_, ASTSQLSecurity & sql_security, bool is_attach = false, bool is_materialized_view = false);
private:
struct TableProperties
{

View File

@ -125,7 +125,10 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query)
Block InterpreterInsertQuery::getSampleBlock(
const ASTInsertQuery & query,
const StoragePtr & table,
const StorageMetadataPtr & metadata_snapshot) const
const StorageMetadataPtr & metadata_snapshot,
ContextPtr context_,
bool no_destination,
bool allow_materialized)
{
/// If the query does not include information about columns
if (!query.columns)
@ -139,7 +142,7 @@ Block InterpreterInsertQuery::getSampleBlock(
}
/// Form the block based on the column names from the query
const auto columns_ast = processColumnTransformers(getContext()->getCurrentDatabase(), table, metadata_snapshot, query.columns);
const auto columns_ast = processColumnTransformers(context_->getCurrentDatabase(), table, metadata_snapshot, query.columns);
Names names;
names.reserve(columns_ast->children.size());
for (const auto & identifier : columns_ast->children)
@ -148,7 +151,7 @@ Block InterpreterInsertQuery::getSampleBlock(
names.emplace_back(std::move(current_name));
}
return getSampleBlock(names, table, metadata_snapshot);
return getSampleBlock(names, table, metadata_snapshot, allow_materialized);
}
std::optional<Names> InterpreterInsertQuery::getInsertColumnNames() const
@ -173,7 +176,8 @@ std::optional<Names> InterpreterInsertQuery::getInsertColumnNames() const
Block InterpreterInsertQuery::getSampleBlock(
const Names & names,
const StoragePtr & table,
const StorageMetadataPtr & metadata_snapshot) const
const StorageMetadataPtr & metadata_snapshot,
bool allow_materialized)
{
Block table_sample_physical = metadata_snapshot->getSampleBlock();
Block table_sample_insertable = metadata_snapshot->getSampleBlockInsertable();
@ -260,7 +264,8 @@ Chain InterpreterInsertQuery::buildChain(
const StorageMetadataPtr & metadata_snapshot,
const Names & columns,
ThreadStatusesHolderPtr thread_status_holder,
std::atomic_uint64_t * elapsed_counter_ms)
std::atomic_uint64_t * elapsed_counter_ms,
bool check_access)
{
ProfileEvents::increment(ProfileEvents::InsertQueriesWithSubqueries);
ProfileEvents::increment(ProfileEvents::QueriesWithSubqueries);
@ -271,7 +276,9 @@ Chain InterpreterInsertQuery::buildChain(
if (!running_group)
running_group = std::make_shared<ThreadGroup>(getContext());
auto sample = getSampleBlock(columns, table, metadata_snapshot);
auto sample = getSampleBlock(columns, table, metadata_snapshot, allow_materialized);
if (check_access)
getContext()->checkAccess(AccessType::INSERT, table->getStorageID(), sample.getNames());
Chain sink = buildSink(table, metadata_snapshot, thread_status_holder, running_group, elapsed_counter_ms);
Chain chain = buildPreSinkChain(sink.getInputHeader(), table, metadata_snapshot, sample);
@ -397,7 +404,7 @@ BlockIO InterpreterInsertQuery::execute()
auto table_lock = table->lockForShare(getContext()->getInitialQueryId(), settings.lock_acquire_timeout);
auto metadata_snapshot = table->getInMemoryMetadataPtr();
auto query_sample_block = getSampleBlock(query, table, metadata_snapshot);
auto query_sample_block = getSampleBlock(query, table, metadata_snapshot, getContext(), no_destination, allow_materialized);
/// For table functions we check access while executing
/// getTable() -> ITableFunction::execute().

View File

@ -46,14 +46,21 @@ public:
const StorageMetadataPtr & metadata_snapshot,
const Names & columns,
ThreadStatusesHolderPtr thread_status_holder = {},
std::atomic_uint64_t * elapsed_counter_ms = nullptr);
std::atomic_uint64_t * elapsed_counter_ms = nullptr,
bool check_access = false);
static void extendQueryLogElemImpl(QueryLogElement & elem, ContextPtr context_);
void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr context_) const override;
StoragePtr getTable(ASTInsertQuery & query);
Block getSampleBlock(const ASTInsertQuery & query, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot) const;
static Block getSampleBlock(
const ASTInsertQuery & query,
const StoragePtr & table,
const StorageMetadataPtr & metadata_snapshot,
ContextPtr context_,
bool no_destination = false,
bool allow_materialized = false);
bool supportsTransactions() const override { return true; }
@ -62,7 +69,7 @@ public:
bool shouldAddSquashingFroStorage(const StoragePtr & table) const;
private:
Block getSampleBlock(const Names & names, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot) const;
static Block getSampleBlock(const Names & names, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot, bool allow_materialized);
ASTPtr query_ptr;
const bool allow_materialized;

View File

@ -277,9 +277,11 @@ BlockIO InterpreterKillQueryQuery::execute()
code = CancellationCode::NotFound;
else
{
ParserAlterCommand parser;
const auto alter_command = command_col.getDataAt(i).toString();
const auto with_round_bracket = alter_command.front() == '(';
ParserAlterCommand parser{with_round_bracket};
auto command_ast
= parseQuery(parser, command_col.getDataAt(i).toString(), 0, getContext()->getSettingsRef().max_parser_depth);
= parseQuery(parser, alter_command, 0, getContext()->getSettingsRef().max_parser_depth);
required_access_rights = InterpreterAlterQuery::getRequiredAccessForCommand(
command_ast->as<const ASTAlterCommand &>(), table_id.database_name, table_id.table_name);
if (!access->isGranted(required_access_rights))

View File

@ -830,7 +830,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
if (query.prewhere() && !query.where())
analysis_result.prewhere_info->need_filter = true;
if (table_id && got_storage_from_query && !joined_tables.isLeftTableFunction())
if (table_id && got_storage_from_query && !joined_tables.isLeftTableFunction() && !options.ignore_access_check)
{
/// The current user should have the SELECT privilege. If this table_id is for a table
/// function we don't check access rights here because in this case they have been already

View File

@ -46,6 +46,10 @@ struct SelectQueryOptions
/// Bypass setting constraints for some internal queries such as projection ASTs.
bool ignore_setting_constraints = false;
/// Bypass access check for select query.
/// This allows to skip double access check in some specific cases (e.g. insert into table with materialized view)
bool ignore_access_check = false;
/// These two fields are used to evaluate shardNum() and shardCount() function when
/// prefer_localhost_replica == 1 and local instance is selected. They are needed because local
/// instance might have multiple shards and scalars can only hold one value.
@ -129,6 +133,12 @@ struct SelectQueryOptions
return *this;
}
SelectQueryOptions & ignoreAccessCheck(bool value = true)
{
ignore_access_check = value;
return *this;
}
SelectQueryOptions & setInternal(bool value = false)
{
is_internal = value;

View File

@ -407,8 +407,13 @@ CSN TransactionLog::commitTransaction(const MergeTreeTransactionPtr & txn, bool
{
Coordination::SimpleFaultInjection fault(fault_probability_before_commit, fault_probability_after_commit, "commit");
Coordination::Requests requests;
requests.push_back(zkutil::makeCreateRequest(zookeeper_path_log + "/csn-", serializeTID(txn->tid), zkutil::CreateMode::PersistentSequential));
/// Commit point
csn_path_created = current_zookeeper->create(zookeeper_path_log + "/csn-", serializeTID(txn->tid), zkutil::CreateMode::PersistentSequential);
auto res = current_zookeeper->multi(requests, /* check_session_valid */ true);
csn_path_created = dynamic_cast<const Coordination::CreateResponse *>(res.back().get())->path_created;
}
catch (const Coordination::Exception & e)
{

View File

@ -1,6 +1,7 @@
#include <iomanip>
#include <IO/Operators.h>
#include <Parsers/ASTAlterQuery.h>
#include <Core/ServerSettings.h>
#include <IO/Operators.h>
#include <Common/quoteString.h>
@ -69,6 +70,9 @@ ASTPtr ASTAlterCommand::clone() const
void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{
if (format_alter_commands_with_parentheses)
settings.ostr << "(";
if (type == ASTAlterCommand::ADD_COLUMN)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "ADD COLUMN " << (if_not_exists ? "IF NOT EXISTS " : "")
@ -469,6 +473,11 @@ void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState &
settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO ";
rename_to->formatImpl(settings, state, frame);
}
else if (type == ASTAlterCommand::MODIFY_SQL_SECURITY)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "MODIFY " << (settings.hilite ? hilite_none : "");
sql_security->formatImpl(settings, state, frame);
}
else if (type == ASTAlterCommand::APPLY_DELETED_MASK)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "APPLY DELETED MASK" << (settings.hilite ? hilite_none : "");
@ -481,6 +490,9 @@ void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState &
}
else
throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected type of ALTER");
if (format_alter_commands_with_parentheses)
settings.ostr << ")";
}
void ASTAlterCommand::forEachPointerToChild(std::function<void(void**)> f)

View File

@ -83,6 +83,7 @@ public:
MODIFY_DATABASE_SETTING,
MODIFY_COMMENT,
MODIFY_SQL_SECURITY,
};
Type type = NO_TYPE;
@ -165,6 +166,9 @@ public:
/// For MODIFY_QUERY
IAST * select = nullptr;
/// For MODIFY_SQL_SECURITY
IAST * sql_security = nullptr;
/// In ALTER CHANNEL, ADD, DROP, SUSPEND, RESUME, REFRESH, MODIFY queries, the list of live views is stored here
IAST * values = nullptr;
@ -222,10 +226,16 @@ public:
ASTPtr clone() const override;
// This function is only meant to be called during application startup
// For reasons see https://github.com/ClickHouse/ClickHouse/pull/59532
static void setFormatAlterCommandsWithParentheses(bool value) { format_alter_commands_with_parentheses = value; }
protected:
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
void forEachPointerToChild(std::function<void(void**)> f) override;
static inline bool format_alter_commands_with_parentheses = false;
};
class ASTAlterQuery : public ASTQueryWithTableAndOutput, public ASTQueryWithOnCluster

View File

@ -12,6 +12,37 @@
namespace DB
{
void ASTSQLSecurity::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{
if (!type.has_value())
return;
if (definer || is_definer_current_user)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "DEFINER" << (settings.hilite ? hilite_none : "");
settings.ostr << " = ";
if (definer)
definer->formatImpl(settings, state, frame);
else
settings.ostr << "CURRENT_USER";
settings.ostr << " ";
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SQL SECURITY" << (settings.hilite ? hilite_none : "");
switch (*type)
{
case SQLSecurityType::INVOKER:
settings.ostr << " INVOKER";
break;
case SQLSecurityType::DEFINER:
settings.ostr << " DEFINER";
break;
case SQLSecurityType::NONE:
settings.ostr << " NONE";
break;
}
}
ASTPtr ASTStorage::clone() const
{
auto res = std::make_shared<ASTStorage>(*this);
@ -292,10 +323,9 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
else if (is_window_view)
what = "WINDOW VIEW";
settings.ostr
<< (settings.hilite ? hilite_keyword : "")
<< action << " "
<< (temporary ? "TEMPORARY " : "")
settings.ostr << (settings.hilite ? hilite_keyword : "") << action << (settings.hilite ? hilite_none : "");
settings.ostr << " ";
settings.ostr << (settings.hilite ? hilite_keyword : "") << (temporary ? "TEMPORARY " : "")
<< what << " "
<< (if_not_exists ? "IF NOT EXISTS " : "")
<< (settings.hilite ? hilite_none : "")
@ -444,10 +474,16 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
else if (is_create_empty)
settings.ostr << (settings.hilite ? hilite_keyword : "") << " EMPTY" << (settings.hilite ? hilite_none : "");
if (sql_security && sql_security->as<ASTSQLSecurity &>().type.has_value())
{
settings.ostr << settings.nl_or_ws;
sql_security->formatImpl(settings, state, frame);
}
if (select)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << " AS"
<< settings.nl_or_ws
settings.ostr << settings.nl_or_ws;
settings.ostr << (settings.hilite ? hilite_keyword : "") << "AS "
<< (comment ? "(" : "") << (settings.hilite ? hilite_none : "");
select->formatImpl(settings, state, frame);
settings.ostr << (settings.hilite ? hilite_keyword : "") << (comment ? ")" : "") << (settings.hilite ? hilite_none : "");

View File

@ -5,6 +5,7 @@
#include <Parsers/ASTDictionary.h>
#include <Parsers/ASTDictionaryAttributeDeclaration.h>
#include <Parsers/ASTTableOverrides.h>
#include <Parsers/ASTSQLSecurity.h>
#include <Parsers/ASTRefreshStrategy.h>
#include <Interpreters/StorageID.h>
@ -15,6 +16,7 @@ class ASTFunction;
class ASTSetQuery;
class ASTSelectWithUnionQuery;
class ASTStorage : public IAST
{
public:
@ -111,6 +113,7 @@ public:
IAST * as_table_function = nullptr;
ASTSelectWithUnionQuery * select = nullptr;
IAST * comment = nullptr;
ASTPtr sql_security = nullptr;
ASTTableOverrideList * table_overrides = nullptr; /// For CREATE DATABASE with engines that automatically create tables

View File

@ -380,7 +380,7 @@ namespace
findMySQLFunctionSecretArguments();
}
else if ((engine_name == "S3") || (engine_name == "COSN") || (engine_name == "OSS") ||
(engine_name == "DeltaLake") || (engine_name == "Hudi") || (engine_name == "Iceberg"))
(engine_name == "DeltaLake") || (engine_name == "Hudi") || (engine_name == "Iceberg") || (engine_name == "S3Queue"))
{
/// S3('url', ['aws_access_key_id', 'aws_secret_access_key',] ...)
findS3TableEngineSecretArguments();

View File

@ -0,0 +1,39 @@
#include <Parsers/ASTSQLSecurity.h>
#include <IO/Operators.h>
namespace DB
{
void ASTSQLSecurity::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{
if (!type.has_value())
return;
if (definer || is_definer_current_user)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "DEFINER" << (settings.hilite ? hilite_none : "");
settings.ostr << " = ";
if (definer)
definer->formatImpl(settings, state, frame);
else
settings.ostr << "CURRENT_USER";
settings.ostr << " ";
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SQL SECURITY" << (settings.hilite ? hilite_none : "");
switch (*type)
{
case SQLSecurityType::INVOKER:
settings.ostr << " INVOKER";
break;
case SQLSecurityType::DEFINER:
settings.ostr << " DEFINER";
break;
case SQLSecurityType::NONE:
settings.ostr << " NONE";
break;
}
}
}

View File

@ -0,0 +1,26 @@
#pragma once
#include <Parsers/Access/ASTUserNameWithHost.h>
#include <Access/Common/SQLSecurityDefs.h>
namespace DB
{
/// DEFINER = <user_name | CURRENT_USER> SQL SECURITY <DEFINER | INVOKER | NONE>
/// If type was not set during parsing, the default type from settings will be used.
/// Currently supports only views.
class ASTSQLSecurity : public IAST
{
public:
bool is_definer_current_user{false};
std::shared_ptr<ASTUserNameWithHost> definer = nullptr;
std::optional<SQLSecurityType> type = std::nullopt;
String getID(char) const override { return "View SQL Security"; }
ASTPtr clone() const override { return std::make_shared<ASTSQLSecurity>(*this); }
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override;
};
}

View File

@ -28,6 +28,12 @@ void ASTUserNameWithHost::concatParts()
host_pattern.clear();
}
void ASTUserNameWithHost::replace(const String name_)
{
base_name = name_;
host_pattern.clear();
}
void ASTUserNamesWithHost::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const
{

View File

@ -27,6 +27,7 @@ public:
String getID(char) const override { return "UserNameWithHost"; }
ASTPtr clone() const override { return std::make_shared<ASTUserNameWithHost>(*this); }
void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
void replace(const String name_);
};

View File

@ -40,6 +40,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ParserKeyword s_modify_setting("MODIFY SETTING");
ParserKeyword s_reset_setting("RESET SETTING");
ParserKeyword s_modify_query("MODIFY QUERY");
ParserKeyword s_modify_sql_security("MODIFY SQL SECURITY");
ParserKeyword s_modify_refresh("MODIFY REFRESH");
ParserKeyword s_add_index("ADD INDEX");
@ -117,6 +118,9 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ParserKeyword s_remove_sample_by("REMOVE SAMPLE BY");
ParserKeyword s_apply_deleted_mask("APPLY DELETED MASK");
ParserToken parser_opening_round_bracket(TokenType::OpeningRoundBracket);
ParserToken parser_closing_round_bracket(TokenType::ClosingRoundBracket);
ParserCompoundIdentifier parser_name;
ParserStringLiteral parser_string_literal;
ParserStringAndSubstitution parser_string_and_substituion;
@ -138,6 +142,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
/* allow_empty = */ false);
ParserNameList values_p;
ParserSelectWithUnionQuery select_p;
ParserSQLSecurity sql_security_p;
ParserRefreshStrategy refresh_p;
ParserTTLExpressionList parser_ttl_list;
@ -162,6 +167,13 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ASTPtr command_select;
ASTPtr command_values;
ASTPtr command_rename_to;
ASTPtr command_sql_security;
if (with_round_bracket)
{
if (!parser_opening_round_bracket.ignore(pos, expected))
return false;
}
switch (alter_object)
{
@ -857,6 +869,14 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
return false;
command->type = ASTAlterCommand::MODIFY_QUERY;
}
else if (s_modify_sql_security.ignore(pos, expected))
{
/// This is a hack so we can reuse parser from create and don't have to write `MODIFY SQL SECURITY SQL SECURITY INVOKER`
pos -= 2;
if (!sql_security_p.parse(pos, command_sql_security, expected))
return false;
command->type = ASTAlterCommand::MODIFY_SQL_SECURITY;
}
else if (s_modify_refresh.ignore(pos, expected))
{
if (!refresh_p.parse(pos, command->refresh, expected))
@ -885,6 +905,12 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
}
}
if (with_round_bracket)
{
if (!parser_closing_round_bracket.ignore(pos, expected))
return false;
}
if (command_col_decl)
command->col_decl = command->children.emplace_back(std::move(command_col_decl)).get();
if (command_column)
@ -925,6 +951,8 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->select = command->children.emplace_back(std::move(command_select)).get();
if (command_values)
command->values = command->children.emplace_back(std::move(command_values)).get();
if (command_sql_security)
command->sql_security = command->children.emplace_back(std::move(command_sql_security)).get();
if (command_rename_to)
command->rename_to = command->children.emplace_back(std::move(command_rename_to)).get();
@ -938,7 +966,10 @@ bool ParserAlterCommandList::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
node = command_list;
ParserToken s_comma(TokenType::Comma);
ParserAlterCommand p_command(alter_object);
const auto with_round_bracket = pos->type == TokenType::OpeningRoundBracket;
ParserAlterCommand p_command(with_round_bracket, alter_object);
do
{

View File

@ -61,10 +61,15 @@ protected:
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
public:
bool with_round_bracket;
ASTAlterQuery::AlterObjectType alter_object;
ParserAlterCommand(ASTAlterQuery::AlterObjectType alter_object_ = ASTAlterQuery::AlterObjectType::TABLE)
: alter_object(alter_object_) {}
explicit ParserAlterCommand(
bool with_round_bracket_, ASTAlterQuery::AlterObjectType alter_object_ = ASTAlterQuery::AlterObjectType::TABLE)
: with_round_bracket(with_round_bracket_), alter_object(alter_object_)
{
}
};

View File

@ -1,4 +1,5 @@
#include <IO/ReadHelpers.h>
#include <Parsers/Access/ParserUserNameWithHost.h>
#include <Parsers/ASTConstraintDeclaration.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTExpressionList.h>
@ -84,6 +85,65 @@ bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
return true;
}
bool ParserSQLSecurity::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
ParserToken s_eq(TokenType::Equals);
ParserKeyword s_definer("DEFINER");
bool is_definer_current_user = false;
ASTPtr definer;
std::optional<SQLSecurityType> type;
while (true)
{
if (!definer && s_definer.ignore(pos, expected))
{
s_eq.ignore(pos, expected);
if (ParserKeyword{"CURRENT_USER"}.ignore(pos, expected))
is_definer_current_user = true;
else if (!ParserUserNameWithHost{}.parse(pos, definer, expected))
return false;
continue;
}
if (!type && ParserKeyword{"SQL SECURITY"}.ignore(pos, expected))
{
if (s_definer.ignore(pos, expected))
type = SQLSecurityType::DEFINER;
else if (ParserKeyword{"INVOKER"}.ignore(pos, expected))
type = SQLSecurityType::INVOKER;
else if (ParserKeyword{"NONE"}.ignore(pos, expected))
type = SQLSecurityType::NONE;
else
return false;
continue;
}
break;
}
if (!type)
{
if (is_definer_current_user || definer)
type = SQLSecurityType::DEFINER;
else
return false;
}
else if (type == SQLSecurityType::DEFINER && !definer)
is_definer_current_user = true;
auto result = std::make_shared<ASTSQLSecurity>();
result->is_definer_current_user = is_definer_current_user;
result->type = type;
if (definer)
result->definer = typeid_cast<std::shared_ptr<ASTUserNameWithHost>>(definer);
node = std::move(result);
return true;
}
bool ParserIdentifierWithParameters::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
@ -849,6 +909,7 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
ParserStorage storage_inner{ParserStorage::TABLE_ENGINE};
ParserTablePropertiesDeclarationList table_properties_p;
ParserSelectWithUnionQuery select_p;
ParserSQLSecurity sql_security_p;
ASTPtr table;
ASTPtr to_table;
@ -857,6 +918,7 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
ASTPtr as_table;
ASTPtr select;
ASTPtr live_view_periodic_refresh;
ASTPtr sql_security;
String cluster_str;
bool attach = false;
@ -873,6 +935,8 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
return false;
}
sql_security_p.parse(pos, sql_security, expected);
if (!s_live.ignore(pos, expected))
return false;
@ -925,6 +989,9 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
return false;
}
if (!sql_security && !sql_security_p.parse(pos, sql_security, expected))
sql_security = std::make_shared<ASTSQLSecurity>();
/// AS SELECT ...
if (!s_as.ignore(pos, expected))
return false;
@ -967,6 +1034,9 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
if (comment)
query->set(query->comment, comment);
if (sql_security)
query->sql_security = typeid_cast<std::shared_ptr<ASTSQLSecurity>>(sql_security);
return true;
}
@ -1384,6 +1454,7 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
ParserTablePropertiesDeclarationList table_properties_p;
ParserSelectWithUnionQuery select_p;
ParserNameList names_p;
ParserSQLSecurity sql_security_p;
ASTPtr table;
ASTPtr to_table;
@ -1393,6 +1464,7 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
ASTPtr as_database;
ASTPtr as_table;
ASTPtr select;
ASTPtr sql_security;
ASTPtr refresh_strategy;
String cluster_str;
@ -1418,6 +1490,8 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
replace_view = true;
}
sql_security_p.parse(pos, sql_security, expected);
if (!replace_view && s_materialized.ignore(pos, expected))
{
is_materialized_view = true;
@ -1510,6 +1584,9 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
}
}
if (!sql_security && !sql_security_p.parse(pos, sql_security, expected))
sql_security = std::make_shared<ASTSQLSecurity>();
/// AS SELECT ...
if (!s_as.ignore(pos, expected))
return false;
@ -1552,6 +1629,7 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
query->set(query->refresh_strategy, refresh_strategy);
if (comment)
query->set(query->comment, comment);
query->sql_security = typeid_cast<std::shared_ptr<ASTSQLSecurity>>(sql_security);
tryGetIdentifierNameInto(as_database, query->as_database);
tryGetIdentifierNameInto(as_table, query->as_table);

View File

@ -25,6 +25,14 @@ protected:
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
/** Parses sql security option. DEFINER = user_name SQL SECURITY DEFINER
*/
class ParserSQLSecurity : public IParserBase
{
protected:
const char * getName() const override { return "sql security"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
/** Storage engine or Codec. For example:
* Memory()

View File

@ -62,6 +62,18 @@ public:
return *this;
}
ALWAYS_INLINE TokenIterator & operator-=(int value)
{
index -= value;
return *this;
}
ALWAYS_INLINE TokenIterator & operator+=(int value)
{
index += value;
return *this;
}
ALWAYS_INLINE bool operator<(const TokenIterator & rhs) const { return index < rhs.index; }
ALWAYS_INLINE bool operator<=(const TokenIterator & rhs) const { return index <= rhs.index; }
ALWAYS_INLINE bool operator==(const TokenIterator & rhs) const { return index == rhs.index; }

View File

@ -132,7 +132,7 @@ INSTANTIATE_TEST_SUITE_P(ParserOptimizeQuery, ParserTest,
INSTANTIATE_TEST_SUITE_P(ParserOptimizeQuery_FAIL, ParserTest,
::testing::Combine(
::testing::Values(std::make_shared<ParserAlterCommand>()),
::testing::Values(std::make_shared<ParserAlterCommand>(false)),
::testing::ValuesIn(std::initializer_list<ParserTestCase>
{
{
@ -159,7 +159,7 @@ INSTANTIATE_TEST_SUITE_P(ParserOptimizeQuery_FAIL, ParserTest,
INSTANTIATE_TEST_SUITE_P(ParserAlterCommand_MODIFY_COMMENT, ParserTest,
::testing::Combine(
::testing::Values(std::make_shared<ParserAlterCommand>()),
::testing::Values(std::make_shared<ParserAlterCommand>(false)),
::testing::ValuesIn(std::initializer_list<ParserTestCase>
{
{

View File

@ -188,6 +188,244 @@ private:
std::exception_ptr any_exception;
};
/// Generates one chain part for every view in buildPushingToViewsChain
std::optional<Chain> generateViewChain(
ContextPtr context,
const StorageID & view_id,
ThreadGroupPtr running_group,
Chain & result_chain,
ViewsDataPtr views_data,
ThreadStatusesHolderPtr thread_status_holder,
bool async_insert,
const Block & storage_header,
bool disable_deduplication_for_children)
{
auto view = DatabaseCatalog::instance().tryGetTable(view_id, context);
if (view == nullptr)
{
LOG_WARNING(
getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName());
return std::nullopt;
}
auto view_metadata_snapshot = view->getInMemoryMetadataPtr();
auto select_context = view_metadata_snapshot->getSQLSecurityOverriddenContext(context);
select_context->setQueryAccessInfo(context->getQueryAccessInfoPtr());
auto insert_context = Context::createCopy(select_context);
const auto & insert_settings = insert_context->getSettingsRef();
// Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children)
{
insert_context->setSetting("insert_deduplicate", Field{false});
}
else if (insert_settings.update_insert_deduplication_token_in_dependent_materialized_views &&
!insert_settings.insert_deduplication_token.value.empty())
{
/** Update deduplication token passed to dependent MV with current view id. So it is possible to properly handle
* deduplication in complex INSERT flows.
*
* Example:
*
* landing ---> mv_1_1 ---> ds_1_1 ---> mv_2_1 ---> ds_2_1 ---> mv_3_1 ---> ds_3_1
* | |
* --> mv_1_2 ---> ds_1_2 ---> mv_2_2 --
*
* Here we want to avoid deduplication for two different blocks generated from `mv_2_1` and `mv_2_2` that will
* be inserted into `ds_2_1`.
*
* We are forced to use view id instead of table id because there are some possible INSERT flows where no tables
* are involved.
*
* Example:
*
* landing ---> mv_1_1 ---> ds_1_1
* | |
* --> mv_1_2 --
*
*/
auto insert_deduplication_token = insert_settings.insert_deduplication_token.value;
if (view_id.hasUUID())
insert_deduplication_token += "_" + toString(view_id.uuid);
else
insert_deduplication_token += "_" + view_id.getFullNameNotQuoted();
insert_context->setSetting("insert_deduplication_token", insert_deduplication_token);
}
// Processing of blocks for MVs is done block by block, and there will
// be no parallel reading after (plus it is not a costless operation)
select_context->setSetting("parallelize_output_from_storages", Field{false});
// Separate min_insert_block_size_rows/min_insert_block_size_bytes for children
if (insert_settings.min_insert_block_size_rows_for_materialized_views)
insert_context->setSetting("min_insert_block_size_rows", insert_settings.min_insert_block_size_rows_for_materialized_views.value);
if (insert_settings.min_insert_block_size_bytes_for_materialized_views)
insert_context->setSetting("min_insert_block_size_bytes", insert_settings.min_insert_block_size_bytes_for_materialized_views.value);
ASTPtr query;
Chain out;
/// We are creating a ThreadStatus per view to store its metrics individually
/// Since calling ThreadStatus() changes current_thread we save it and restore it after the calls
/// Later on, before doing any task related to a view, we'll switch to its ThreadStatus, do the work,
/// and switch back to the original thread_status.
auto * original_thread = current_thread;
SCOPE_EXIT({ current_thread = original_thread; });
current_thread = nullptr;
std::unique_ptr<ThreadStatus> view_thread_status_ptr = std::make_unique<ThreadStatus>(/*check_current_thread_on_destruction=*/ false);
/// Copy of a ThreadStatus should be internal.
view_thread_status_ptr->setInternalThread();
view_thread_status_ptr->attachToGroup(running_group);
auto * view_thread_status = view_thread_status_ptr.get();
views_data->thread_status_holder->thread_statuses.push_front(std::move(view_thread_status_ptr));
auto runtime_stats = std::make_unique<QueryViewsLogElement::ViewRuntimeStats>();
runtime_stats->target_name = view_id.getFullTableName();
runtime_stats->thread_status = view_thread_status;
runtime_stats->event_time = std::chrono::system_clock::now();
runtime_stats->event_status = QueryViewsLogElement::ViewStatus::EXCEPTION_BEFORE_START;
auto & type = runtime_stats->type;
auto & target_name = runtime_stats->target_name;
auto * view_counter_ms = &runtime_stats->elapsed_ms;
if (auto * materialized_view = dynamic_cast<StorageMaterializedView *>(view.get()))
{
auto lock = materialized_view->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
if (lock == nullptr)
{
// In case the materialized view is dropped/detached at this point, we register a warning and ignore it
assert(materialized_view->is_dropped || materialized_view->is_detached);
LOG_WARNING(
getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName());
return std::nullopt;
}
type = QueryViewsLogElement::ViewType::MATERIALIZED;
result_chain.addTableLock(lock);
StoragePtr inner_table = materialized_view->tryGetTargetTable();
/// If target table was dropped, ignore this materialized view.
if (!inner_table)
{
if (context->getSettingsRef().ignore_materialized_views_with_dropped_target_table)
return std::nullopt;
throw Exception(
ErrorCodes::UNKNOWN_TABLE,
"Target table '{}' of view '{}' doesn't exists. To ignore this view use setting "
"ignore_materialized_views_with_dropped_target_table",
materialized_view->getTargetTableId().getFullTableName(),
view_id.getFullTableName());
}
auto inner_table_id = inner_table->getStorageID();
auto inner_metadata_snapshot = inner_table->getInMemoryMetadataPtr();
const auto & select_query = view_metadata_snapshot->getSelectQuery();
if (select_query.select_table_id != views_data->source_storage_id)
{
/// It may happen if materialize view query was changed and it doesn't depend on this source table anymore.
/// See setting `allow_experimental_alter_materialized_view_structure`
LOG_DEBUG(
getLogger("PushingToViews"), "Table '{}' is not a source for view '{}' anymore, current source is '{}'",
select_query.select_table_id.getFullTableName(), view_id.getFullTableName(), views_data->source_storage_id);
return std::nullopt;
}
query = select_query.inner_query;
target_name = inner_table_id.getFullTableName();
Block header;
/// Get list of columns we get from select query.
if (select_context->getSettingsRef().allow_experimental_analyzer)
header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context);
else
header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock();
/// Insert only columns returned by select.
Names insert_columns;
const auto & inner_table_columns = inner_metadata_snapshot->getColumns();
for (const auto & column : header)
{
/// But skip columns which storage doesn't have.
if (inner_table_columns.hasNotAlias(column.name))
insert_columns.emplace_back(column.name);
}
InterpreterInsertQuery interpreter(nullptr, insert_context, false, false, false);
out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, thread_status_holder, view_counter_ms, !materialized_view->hasInnerTable());
if (interpreter.shouldAddSquashingFroStorage(inner_table))
{
bool table_prefers_large_blocks = inner_table->prefersLargeBlocks();
const auto & settings = insert_context->getSettingsRef();
out.addSource(std::make_shared<SquashingChunksTransform>(
out.getInputHeader(),
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL));
}
auto counting = std::make_shared<CountingTransform>(out.getInputHeader(), current_thread, insert_context->getQuota());
counting->setProcessListElement(insert_context->getProcessListElement());
counting->setProgressCallback(insert_context->getProgressCallback());
out.addSource(std::move(counting));
out.addStorageHolder(view);
out.addStorageHolder(inner_table);
}
else if (auto * live_view = dynamic_cast<StorageLiveView *>(view.get()))
{
runtime_stats->type = QueryViewsLogElement::ViewType::LIVE;
query = live_view->getInnerQuery();
out = buildPushingToViewsChain(
view, view_metadata_snapshot, insert_context, ASTPtr(),
/* no_destination= */ true,
thread_status_holder, running_group, view_counter_ms, async_insert, storage_header);
}
else if (auto * window_view = dynamic_cast<StorageWindowView *>(view.get()))
{
runtime_stats->type = QueryViewsLogElement::ViewType::WINDOW;
query = window_view->getMergeableQuery();
out = buildPushingToViewsChain(
view, view_metadata_snapshot, insert_context, ASTPtr(),
/* no_destination= */ true,
thread_status_holder, running_group, view_counter_ms, async_insert);
}
else
out = buildPushingToViewsChain(
view, view_metadata_snapshot, insert_context, ASTPtr(),
/* no_destination= */ false,
thread_status_holder, running_group, view_counter_ms, async_insert);
views_data->views.emplace_back(ViewRuntimeData{
std::move(query),
out.getInputHeader(),
view_id,
nullptr,
std::move(runtime_stats)});
if (type == QueryViewsLogElement::ViewType::MATERIALIZED)
{
auto executing_inner_query = std::make_shared<ExecutingInnerQueryFromViewTransform>(
storage_header, views_data->views.back(), views_data);
executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms);
out.addSource(std::move(executing_inner_query));
}
return out;
}
Chain buildPushingToViewsChain(
const StoragePtr & storage,
@ -232,259 +470,45 @@ Chain buildPushingToViewsChain(
auto table_id = storage->getStorageID();
auto views = DatabaseCatalog::instance().getDependentViews(table_id);
/// We need special context for materialized views insertions
ContextMutablePtr select_context;
ContextMutablePtr insert_context;
ViewsDataPtr views_data;
if (!views.empty())
{
select_context = Context::createCopy(context);
insert_context = Context::createCopy(context);
const auto & insert_settings = insert_context->getSettingsRef();
// Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children)
{
insert_context->setSetting("insert_deduplicate", Field{false});
}
// Processing of blocks for MVs is done block by block, and there will
// be no parallel reading after (plus it is not a costless operation)
select_context->setSetting("parallelize_output_from_storages", Field{false});
// Separate min_insert_block_size_rows/min_insert_block_size_bytes for children
if (insert_settings.min_insert_block_size_rows_for_materialized_views)
insert_context->setSetting("min_insert_block_size_rows", insert_settings.min_insert_block_size_rows_for_materialized_views.value);
if (insert_settings.min_insert_block_size_bytes_for_materialized_views)
insert_context->setSetting("min_insert_block_size_bytes", insert_settings.min_insert_block_size_bytes_for_materialized_views.value);
views_data = std::make_shared<ViewsData>(thread_status_holder, select_context, table_id, metadata_snapshot, storage);
auto process_context = Context::createCopy(context); /// This context will be used in `process` function
views_data = std::make_shared<ViewsData>(thread_status_holder, process_context, table_id, metadata_snapshot, storage);
}
std::vector<Chain> chains;
for (const auto & view_id : views)
{
auto view = DatabaseCatalog::instance().tryGetTable(view_id, context);
if (view == nullptr)
try
{
LOG_WARNING(
getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName());
continue;
}
auto out = generateViewChain(
context, view_id, running_group, result_chain,
views_data, thread_status_holder, async_insert, storage_header, disable_deduplication_for_children);
auto view_metadata_snapshot = view->getInMemoryMetadataPtr();
ASTPtr query;
Chain out;
/// We are creating a ThreadStatus per view to store its metrics individually
/// Since calling ThreadStatus() changes current_thread we save it and restore it after the calls
/// Later on, before doing any task related to a view, we'll switch to its ThreadStatus, do the work,
/// and switch back to the original thread_status.
auto * original_thread = current_thread;
SCOPE_EXIT({ current_thread = original_thread; });
current_thread = nullptr;
std::unique_ptr<ThreadStatus> view_thread_status_ptr = std::make_unique<ThreadStatus>(/*check_current_thread_on_destruction=*/ false);
/// Copy of a ThreadStatus should be internal.
view_thread_status_ptr->setInternalThread();
view_thread_status_ptr->attachToGroup(running_group);
auto * view_thread_status = view_thread_status_ptr.get();
views_data->thread_status_holder->thread_statuses.push_front(std::move(view_thread_status_ptr));
auto runtime_stats = std::make_unique<QueryViewsLogElement::ViewRuntimeStats>();
runtime_stats->target_name = view_id.getFullTableName();
runtime_stats->thread_status = view_thread_status;
runtime_stats->event_time = std::chrono::system_clock::now();
runtime_stats->event_status = QueryViewsLogElement::ViewStatus::EXCEPTION_BEFORE_START;
auto & type = runtime_stats->type;
auto & target_name = runtime_stats->target_name;
auto * view_counter_ms = &runtime_stats->elapsed_ms;
const auto & insert_settings = insert_context->getSettingsRef();
ContextMutablePtr view_insert_context = insert_context;
if (!disable_deduplication_for_children &&
insert_settings.update_insert_deduplication_token_in_dependent_materialized_views &&
!insert_settings.insert_deduplication_token.value.empty())
{
/** Update deduplication token passed to dependent MV with current view id. So it is possible to properly handle
* deduplication in complex INSERT flows.
*
* Example:
*
* landing ---> mv_1_1 ---> ds_1_1 ---> mv_2_1 ---> ds_2_1 ---> mv_3_1 ---> ds_3_1
* | |
* --> mv_1_2 ---> ds_1_2 ---> mv_2_2 --
*
* Here we want to avoid deduplication for two different blocks generated from `mv_2_1` and `mv_2_2` that will
* be inserted into `ds_2_1`.
*
* We are forced to use view id instead of table id because there are some possible INSERT flows where no tables
* are involved.
*
* Example:
*
* landing ---> mv_1_1 ---> ds_1_1
* | |
* --> mv_1_2 --
*
*/
auto insert_deduplication_token = insert_settings.insert_deduplication_token.value;
if (view_id.hasUUID())
insert_deduplication_token += "_" + toString(view_id.uuid);
else
insert_deduplication_token += "_" + view_id.getFullNameNotQuoted();
view_insert_context = Context::createCopy(insert_context);
view_insert_context->setSetting("insert_deduplication_token", insert_deduplication_token);
}
if (auto * materialized_view = dynamic_cast<StorageMaterializedView *>(view.get()))
{
auto lock = materialized_view->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
if (lock == nullptr)
{
// In case the materialized view is dropped/detached at this point, we register a warning and ignore it
assert(materialized_view->is_dropped || materialized_view->is_detached);
LOG_WARNING(
getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName());
if (!out.has_value())
continue;
}
type = QueryViewsLogElement::ViewType::MATERIALIZED;
result_chain.addTableLock(lock);
chains.emplace_back(std::move(*out));
StoragePtr inner_table = materialized_view->tryGetTargetTable();
/// If target table was dropped, ignore this materialized view.
if (!inner_table)
/// Add the view to the query access info so it can appear in system.query_log
/// hasQueryContext - for materialized tables with background replication process query context is not added
if (!no_destination && context->hasQueryContext())
{
if (context->getSettingsRef().ignore_materialized_views_with_dropped_target_table)
continue;
context->getQueryContext()->addQueryAccessInfo(
backQuoteIfNeed(view_id.getDatabaseName()),
views_data->views.back().runtime_stats->target_name,
/*column_names=*/ {});
throw Exception(
ErrorCodes::UNKNOWN_TABLE,
"Target table '{}' of view '{}' doesn't exists. To ignore this view use setting "
"ignore_materialized_views_with_dropped_target_table",
materialized_view->getTargetTableId().getFullTableName(),
view_id.getFullTableName());
context->getQueryContext()->addViewAccessInfo(view_id.getFullTableName());
}
auto inner_table_id = inner_table->getStorageID();
auto inner_metadata_snapshot = inner_table->getInMemoryMetadataPtr();
const auto & select_query = view_metadata_snapshot->getSelectQuery();
if (select_query.select_table_id != table_id)
{
/// It may happen if materialize view query was changed and it doesn't depend on this source table anymore.
/// See setting `allow_experimental_alter_materialized_view_structure`
LOG_DEBUG(
getLogger("PushingToViews"), "Table '{}' is not a source for view '{}' anymore, current source is '{}'",
select_query.select_table_id.getFullTableName(), view_id.getFullTableName(), table_id);
continue;
}
query = select_query.inner_query;
target_name = inner_table_id.getFullTableName();
Block header;
/// Get list of columns we get from select query.
if (select_context->getSettingsRef().allow_experimental_analyzer)
header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context);
else
header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock();
/// Insert only columns returned by select.
Names insert_columns;
const auto & inner_table_columns = inner_metadata_snapshot->getColumns();
for (const auto & column : header)
{
/// But skip columns which storage doesn't have.
if (inner_table_columns.hasNotAlias(column.name))
insert_columns.emplace_back(column.name);
}
InterpreterInsertQuery interpreter(nullptr, view_insert_context, false, false, false);
out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, thread_status_holder, view_counter_ms);
if (interpreter.shouldAddSquashingFroStorage(inner_table))
{
bool table_prefers_large_blocks = inner_table->prefersLargeBlocks();
const auto & settings = view_insert_context->getSettingsRef();
out.addSource(std::make_shared<SquashingChunksTransform>(
out.getInputHeader(),
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL));
}
auto counting = std::make_shared<CountingTransform>(out.getInputHeader(), current_thread, view_insert_context->getQuota());
counting->setProcessListElement(view_insert_context->getProcessListElement());
counting->setProgressCallback(view_insert_context->getProgressCallback());
out.addSource(std::move(counting));
out.addStorageHolder(view);
out.addStorageHolder(inner_table);
}
else if (auto * live_view = dynamic_cast<StorageLiveView *>(view.get()))
catch (const Exception & e)
{
runtime_stats->type = QueryViewsLogElement::ViewType::LIVE;
query = live_view->getInnerQuery(); // Used only to log in system.query_views_log
out = buildPushingToViewsChain(
view, view_metadata_snapshot, view_insert_context, ASTPtr(),
/* no_destination= */ true,
thread_status_holder, running_group, view_counter_ms, async_insert, storage_header);
}
else if (auto * window_view = dynamic_cast<StorageWindowView *>(view.get()))
{
runtime_stats->type = QueryViewsLogElement::ViewType::WINDOW;
query = window_view->getMergeableQuery(); // Used only to log in system.query_views_log
out = buildPushingToViewsChain(
view, view_metadata_snapshot, view_insert_context, ASTPtr(),
/* no_destination= */ true,
thread_status_holder, running_group, view_counter_ms, async_insert);
}
else
out = buildPushingToViewsChain(
view, view_metadata_snapshot, view_insert_context, ASTPtr(),
/* no_destination= */ false,
thread_status_holder, running_group, view_counter_ms, async_insert);
views_data->views.emplace_back(ViewRuntimeData{
std::move(query),
out.getInputHeader(),
view_id,
nullptr,
std::move(runtime_stats)});
if (type == QueryViewsLogElement::ViewType::MATERIALIZED)
{
auto executing_inner_query = std::make_shared<ExecutingInnerQueryFromViewTransform>(
storage_header, views_data->views.back(), views_data);
executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms);
out.addSource(std::move(executing_inner_query));
}
chains.emplace_back(std::move(out));
/// Add the view to the query access info so it can appear in system.query_log
/// hasQueryContext - for materialized tables with background replication process query context is not added
if (!no_destination && context->hasQueryContext())
{
context->getQueryContext()->addQueryAccessInfo(
backQuoteIfNeed(view_id.getDatabaseName()),
views_data->views.back().runtime_stats->target_name,
/*column_names=*/ {});
context->getQueryContext()->addViewAccessInfo(view_id.getFullTableName());
LOG_ERROR(&Poco::Logger::get("PushingToViews"), "Failed to push block to view {}, {}", view_id, e.message());
if (!context->getSettingsRef().materialized_views_ignore_errors)
throw;
}
}
@ -581,12 +605,12 @@ static QueryPipeline process(Block block, ViewRuntimeData & view, const ViewsDat
if (local_context->getSettingsRef().allow_experimental_analyzer)
{
InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions());
InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions().ignoreAccessCheck());
pipeline = interpreter.buildQueryPipeline();
}
else
{
InterpreterSelectQuery interpreter(view.query, local_context, SelectQueryOptions());
InterpreterSelectQuery interpreter(view.query, local_context, SelectQueryOptions().ignoreAccessCheck());
pipeline = interpreter.buildQueryPipeline();
}

View File

@ -884,6 +884,11 @@ void HTTPHandler::processQuery(
{
if (settings.http_write_exception_in_output_format && output_format.supportsWritingException())
{
bool with_stacktrace = (params.getParsed<bool>("stacktrace", false) && server.config().getBool("enable_http_stacktrace", true));
ExecutionStatus status = ExecutionStatus::fromCurrentException("", with_stacktrace);
formatExceptionForClient(status.code, request, response, used_output);
output_format.setException(getCurrentExceptionMessage(false));
output_format.finalize();
used_output.exception_is_written = true;
@ -916,31 +921,7 @@ void HTTPHandler::trySendExceptionToClient(
const std::string & s, int exception_code, HTTPServerRequest & request, HTTPServerResponse & response, Output & used_output)
try
{
if (used_output.out_holder)
used_output.out_holder->setExceptionCode(exception_code);
else
response.set("X-ClickHouse-Exception-Code", toString<int>(exception_code));
/// FIXME: make sure that no one else is reading from the same stream at the moment.
/// If HTTP method is POST and Keep-Alive is turned on, we should read the whole request body
/// to avoid reading part of the current request body in the next request.
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST
&& response.getKeepAlive()
&& exception_code != ErrorCodes::HTTP_LENGTH_REQUIRED
&& !request.getStream().eof())
{
request.getStream().ignoreAll();
}
if (exception_code == ErrorCodes::REQUIRED_PASSWORD)
{
response.requireAuthentication("ClickHouse server HTTP API");
}
else
{
response.setStatusAndReason(exceptionCodeToHTTPStatus(exception_code));
}
formatExceptionForClient(exception_code, request, response, used_output);
if (!used_output.out_holder && !used_output.exception_is_written)
{
@ -1001,6 +982,28 @@ catch (...)
}
}
void HTTPHandler::formatExceptionForClient(int exception_code, HTTPServerRequest & request, HTTPServerResponse & response, Output & used_output)
{
if (used_output.out_holder)
used_output.out_holder->setExceptionCode(exception_code);
else
response.set("X-ClickHouse-Exception-Code", toString<int>(exception_code));
/// FIXME: make sure that no one else is reading from the same stream at the moment.
/// If HTTP method is POST and Keep-Alive is turned on, we should read the whole request body
/// to avoid reading part of the current request body in the next request.
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST && response.getKeepAlive()
&& exception_code != ErrorCodes::HTTP_LENGTH_REQUIRED && !request.getStream().eof())
{
request.getStream().ignoreAll();
}
if (exception_code == ErrorCodes::REQUIRED_PASSWORD)
response.requireAuthentication("ClickHouse server HTTP API");
else
response.setStatusAndReason(exceptionCodeToHTTPStatus(exception_code));
}
void HTTPHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event)
{

View File

@ -148,6 +148,12 @@ private:
HTTPServerResponse & response,
Output & used_output);
void formatExceptionForClient(
int exception_code,
HTTPServerRequest & request,
HTTPServerResponse & response,
Output & used_output);
static void pushDelayedResults(Output & used_output);
};

View File

@ -442,6 +442,14 @@ std::optional<AlterCommand> AlterCommand::parse(const ASTAlterCommand * command_
command.if_exists = command_ast->if_exists;
return command;
}
else if (command_ast->type == ASTAlterCommand::MODIFY_SQL_SECURITY)
{
AlterCommand command;
command.ast = command_ast->clone();
command.type = AlterCommand::MODIFY_SQL_SECURITY;
command.sql_security = command_ast->sql_security->clone();
return command;
}
else
return {};
}
@ -854,6 +862,8 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
for (auto & index : metadata.secondary_indices)
rename_visitor.visit(index.definition_ast);
}
else if (type == MODIFY_SQL_SECURITY)
metadata.setDefiner(sql_security->as<ASTSQLSecurity &>());
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong parameter type in ALTER query");
}

View File

@ -50,6 +50,7 @@ struct AlterCommand
MODIFY_DATABASE_SETTING,
COMMENT_TABLE,
REMOVE_SAMPLE_BY,
MODIFY_SQL_SECURITY,
};
/// Which property user wants to remove from column
@ -147,6 +148,9 @@ struct AlterCommand
/// For MODIFY_QUERY
ASTPtr select = nullptr;
/// For MODIFY_SQL_SECURITY
ASTPtr sql_security = nullptr;
/// For MODIFY_REFRESH
ASTPtr refresh = nullptr;

View File

@ -682,7 +682,7 @@ std::pair<int32_t, int32_t> ReplicatedMergeTreeQueue::pullLogsToQueue(zkutil::Zo
ops.emplace_back(zkutil::makeSetRequest(
fs::path(replica_path) / "min_unprocessed_insert_time", toString(*min_unprocessed_insert_time_changed), -1));
auto responses = zookeeper->multi(ops);
auto responses = zookeeper->multi(ops, /* check_session_valid */ true);
/// Now we have successfully updated the queue in ZooKeeper. Update it in RAM.

View File

@ -879,7 +879,7 @@ std::pair<std::vector<String>, bool> ReplicatedMergeTreeSinkImpl<async_insert>::
fiu_do_on(FailPoints::replicated_merge_tree_commit_zk_fail_after_op, { zookeeper->forceFailureAfterOperation(); });
Coordination::Responses responses;
Coordination::Error multi_code = zookeeper->tryMultiNoThrow(ops, responses); /// 1 RTT
Coordination::Error multi_code = zookeeper->tryMultiNoThrow(ops, responses, /* check_session_valid */ true); /// 1 RTT
if (multi_code == Coordination::Error::ZOK)
{

View File

@ -1,5 +1,8 @@
#include <Storages/StorageInMemoryMetadata.h>
#include <Access/AccessControl.h>
#include <Access/User.h>
#include <Common/HashTable/HashMap.h>
#include <Common/HashTable/HashSet.h>
#include <Common/quoteString.h>
@ -7,6 +10,7 @@
#include <Core/ColumnWithTypeAndName.h>
#include <DataTypes/NestedUtils.h>
#include <DataTypes/DataTypeEnum.h>
#include <Interpreters/Context.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <IO/Operators.h>
@ -23,6 +27,7 @@ namespace ErrorCodes
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
extern const int TYPE_MISMATCH;
extern const int EMPTY_LIST_OF_COLUMNS_PASSED;
extern const int LOGICAL_ERROR;
}
StorageInMemoryMetadata::StorageInMemoryMetadata(const StorageInMemoryMetadata & other)
@ -41,6 +46,8 @@ StorageInMemoryMetadata::StorageInMemoryMetadata(const StorageInMemoryMetadata &
, settings_changes(other.settings_changes ? other.settings_changes->clone() : nullptr)
, select(other.select)
, refresh(other.refresh ? other.refresh->clone() : nullptr)
, definer(other.definer)
, sql_security_type(other.sql_security_type)
, comment(other.comment)
, metadata_version(other.metadata_version)
{
@ -71,6 +78,8 @@ StorageInMemoryMetadata & StorageInMemoryMetadata::operator=(const StorageInMemo
settings_changes.reset();
select = other.select;
refresh = other.refresh ? other.refresh->clone() : nullptr;
definer = other.definer;
sql_security_type = other.sql_security_type;
comment = other.comment;
metadata_version = other.metadata_version;
return *this;
@ -81,6 +90,69 @@ void StorageInMemoryMetadata::setComment(const String & comment_)
comment = comment_;
}
void StorageInMemoryMetadata::setDefiner(const ASTSQLSecurity & sql_security)
{
if (sql_security.definer)
definer = sql_security.definer->toString();
sql_security_type = sql_security.type;
}
UUID StorageInMemoryMetadata::getDefinerID(DB::ContextPtr context) const
{
if (!definer)
{
if (const auto definer_id = context->getUserID())
return *definer_id;
throw Exception(ErrorCodes::LOGICAL_ERROR, "No user in context for sub query execution.");
}
const auto & access_control = context->getAccessControl();
return access_control.getID<User>(*definer);
}
ContextMutablePtr StorageInMemoryMetadata::getSQLSecurityOverriddenContext(ContextPtr context) const
{
if (!sql_security_type.has_value())
return Context::createCopy(context);
if (sql_security_type == SQLSecurityType::INVOKER)
return Context::createCopy(context);
auto new_context = Context::createCopy(context->getGlobalContext());
new_context->setClientInfo(context->getClientInfo());
new_context->makeQueryContext();
const auto & database = context->getCurrentDatabase();
if (!database.empty())
new_context->setCurrentDatabase(database);
new_context->setInsertionTable(context->getInsertionTable(), context->getInsertionTableColumnNames());
new_context->setProgressCallback(context->getProgressCallback());
new_context->setProcessListElement(context->getProcessListElement());
if (context->getCurrentTransaction())
new_context->setCurrentTransaction(context->getCurrentTransaction());
if (context->getZooKeeperMetadataTransaction())
new_context->initZooKeeperMetadataTransaction(context->getZooKeeperMetadataTransaction());
if (sql_security_type == SQLSecurityType::NONE)
{
new_context->applySettingsChanges(context->getSettingsRef().changes());
return new_context;
}
new_context->setUser(getDefinerID(context));
auto changed_settings = context->getSettingsRef().changes();
new_context->clampToSettingsConstraints(changed_settings, SettingSource::QUERY);
new_context->applySettingsChanges(changed_settings);
return new_context;
}
void StorageInMemoryMetadata::setColumns(ColumnsDescription columns_)
{
if (columns_.getAllPhysical().empty())

View File

@ -1,5 +1,7 @@
#pragma once
#include <Parsers/Access/ASTUserNameWithHost.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/IAST_fwd.h>
#include <Storages/ColumnDependency.h>
#include <Storages/ColumnsDescription.h>
@ -51,6 +53,14 @@ struct StorageInMemoryMetadata
/// Materialized view REFRESH parameters.
ASTPtr refresh;
/// DEFINER <user_name>. Allows to specify a definer of the table.
/// Supported for MaterializedView and View.
std::optional<String> definer;
/// SQL SECURITY <DEFINER | INVOKER | NONE>
/// Supported for MaterializedView and View.
std::optional<SQLSecurityType> sql_security_type;
String comment;
/// Version of metadata. Managed properly by ReplicatedMergeTree only
@ -105,6 +115,15 @@ struct StorageInMemoryMetadata
/// Get copy of current metadata with metadata_version_
StorageInMemoryMetadata withMetadataVersion(int32_t metadata_version_) const;
/// Sets a definer for the storage.
void setDefiner(const ASTSQLSecurity & sql_security);
UUID getDefinerID(ContextPtr context) const;
/// Returns a copy of the context with the correct user from SQL security options.
/// If the SQL security wasn't set, this is equivalent to `Context::createCopy(context)`.
/// The context from this function must be used every time whenever views execute any read/write operations or subqueries.
ContextMutablePtr getSQLSecurityOverriddenContext(ContextPtr context) const;
/// Returns combined set of columns
const ColumnsDescription & getColumns() const;

View File

@ -233,7 +233,7 @@ public:
will_be);
}
zookeeper->multi(requests);
zookeeper->multi(requests, /* check_session_valid */ true);
}
};
@ -1210,7 +1210,7 @@ void StorageKeeperMap::mutate(const MutationCommands & commands, ContextPtr loca
}
Coordination::Responses responses;
auto status = client->tryMulti(delete_requests, responses);
auto status = client->tryMulti(delete_requests, responses, /* check_session_valid */ true);
if (status == Coordination::Error::ZOK)
return;

View File

@ -39,6 +39,7 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
extern const int NOT_IMPLEMENTED;
extern const int INCORRECT_QUERY;
extern const int QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW;
extern const int TOO_MANY_MATERIALIZED_VIEWS;
}
@ -77,6 +78,11 @@ StorageMaterializedView::StorageMaterializedView(
{
StorageInMemoryMetadata storage_metadata;
storage_metadata.setColumns(columns_);
if (query.sql_security)
storage_metadata.setDefiner(query.sql_security->as<ASTSQLSecurity &>());
if (storage_metadata.sql_security_type == SQLSecurityType::INVOKER)
throw Exception(ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW, "SQL SECURITY INVOKER can't be specified for MATERIALIZED VIEW");
if (!query.select)
throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName());
@ -175,19 +181,28 @@ void StorageMaterializedView::read(
const size_t max_block_size,
const size_t num_streams)
{
auto context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(local_context);
auto storage = getTargetTable();
auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout);
auto lock = storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout);
auto target_metadata_snapshot = storage->getInMemoryMetadataPtr();
auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot, local_context);
auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot, context);
if (query_info.order_optimizer)
query_info.input_order_info = query_info.order_optimizer->getInputOrder(target_metadata_snapshot, local_context);
query_info.input_order_info = query_info.order_optimizer->getInputOrder(target_metadata_snapshot, context);
storage->read(query_plan, column_names, target_storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams);
if (!getInMemoryMetadataPtr()->select.select_table_id.empty())
context->checkAccess(AccessType::SELECT, getInMemoryMetadataPtr()->select.select_table_id, column_names);
auto storage_id = storage->getStorageID();
/// We don't need to check access if the inner table was created automatically.
if (!has_inner_table && !storage_id.empty())
context->checkAccess(AccessType::SELECT, storage_id, column_names);
storage->read(query_plan, column_names, target_storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
if (query_plan.isInitialized())
{
auto mv_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, local_context, processed_stage);
auto mv_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, context, processed_stage);
auto target_header = query_plan.getCurrentDataStream().header;
/// No need to convert columns that does not exists in MV
@ -222,11 +237,20 @@ void StorageMaterializedView::read(
SinkToStoragePtr StorageMaterializedView::write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr local_context, bool async_insert)
{
auto context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(local_context);
auto storage = getTargetTable();
auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout);
auto lock = storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout);
auto metadata_snapshot = storage->getInMemoryMetadataPtr();
auto sink = storage->write(query, metadata_snapshot, local_context, async_insert);
auto storage_id = storage->getStorageID();
/// We don't need to check access if the inner table was created automatically.
if (!has_inner_table && !storage_id.empty())
{
auto query_sample_block = InterpreterInsertQuery::getSampleBlock(query->as<ASTInsertQuery &>(), storage, metadata_snapshot, context);
context->checkAccess(AccessType::INSERT, storage_id, query_sample_block.getNames());
}
auto sink = storage->write(query, metadata_snapshot, context, async_insert);
sink->addTableLock(lock);
return sink;
@ -297,7 +321,7 @@ bool StorageMaterializedView::optimize(
std::tuple<ContextMutablePtr, std::shared_ptr<ASTInsertQuery>> StorageMaterializedView::prepareRefresh() const
{
auto refresh_context = Context::createCopy(getContext());
auto refresh_context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(getContext());
/// Generate a random query id.
refresh_context->setCurrentQueryId("");
@ -378,15 +402,24 @@ void StorageMaterializedView::checkAlterIsPossible(const AlterCommands & command
{
for (const auto & command : commands)
{
if (command.isCommentAlter())
if (command.type == AlterCommand::MODIFY_SQL_SECURITY)
{
if (command.sql_security->as<ASTSQLSecurity &>().type == SQLSecurityType::INVOKER)
throw Exception(ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW, "SQL SECURITY INVOKER can't be specified for MATERIALIZED VIEW");
continue;
if (command.type == AlterCommand::MODIFY_QUERY)
}
else if (command.isCommentAlter())
continue;
if (command.type == AlterCommand::MODIFY_REFRESH && refresher)
else if (command.type == AlterCommand::MODIFY_QUERY)
continue;
else if (command.type == AlterCommand::MODIFY_REFRESH && refresher)
continue;
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Alter of type '{}' is not supported by storage {}",
command.type, getName());
command.type, getName());
}
}
void StorageMaterializedView::checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const

View File

@ -1300,7 +1300,7 @@ bool StorageReplicatedMergeTree::removeTableNodesFromZooKeeper(zkutil::ZooKeeper
ops.emplace_back(zkutil::makeRemoveRequest(metadata_drop_lock->getPath(), -1));
ops.emplace_back(zkutil::makeRemoveRequest(fs::path(zookeeper_path) / "dropped", -1));
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_path, -1));
code = zookeeper->tryMulti(ops, responses);
code = zookeeper->tryMulti(ops, responses, /* check_session_valid */ true);
if (code == Coordination::Error::ZNONODE)
{
@ -1847,7 +1847,7 @@ MergeTreeData::DataPartsVector StorageReplicatedMergeTree::checkPartChecksumsAnd
Coordination::SimpleFaultInjection fault(getSettings()->fault_probability_before_part_commit,
getSettings()->fault_probability_after_part_commit, "part commit");
ThreadFuzzer::maybeInjectSleep();
e = zookeeper->tryMulti(ops, responses);
e = zookeeper->tryMulti(ops, responses, /* check_session_valid */ true);
}
if (e == Coordination::Error::ZOK)
{
@ -2904,7 +2904,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo
/// Check that log pointer of source replica didn't changed while we read queue entries
ops.push_back(zkutil::makeCheckRequest(fs::path(source_path) / "log_pointer", log_pointer_stat.version));
auto rc = zookeeper->tryMulti(ops, responses);
auto rc = zookeeper->tryMulti(ops, responses, /* check_session_valid */ true);
if (rc == Coordination::Error::ZOK)
{
@ -3259,7 +3259,7 @@ void StorageReplicatedMergeTree::cloneMetadataIfNeeded(const String & source_rep
ops.emplace_back(zkutil::makeCheckRequest(source_path + "/metadata", metadata_stat.version));
ops.emplace_back(zkutil::makeCheckRequest(source_path + "/columns", columns_stat.version));
Coordination::Error code = zookeeper->tryMulti(ops, responses);
Coordination::Error code = zookeeper->tryMulti(ops, responses, /* check_session_valid */ true);
if (code == Coordination::Error::ZOK)
break;
else if (code == Coordination::Error::ZBADVERSION)
@ -4134,7 +4134,7 @@ void StorageReplicatedMergeTree::removePartAndEnqueueFetch(const String & part_n
zkutil::CreateMode::PersistentSequential));
Coordination::Responses results;
auto rc = zookeeper->tryMulti(ops, results);
auto rc = zookeeper->tryMulti(ops, results, /* check_session_valid */ true);
if (rc == Coordination::Error::ZBADVERSION)
{
@ -5849,7 +5849,7 @@ bool StorageReplicatedMergeTree::executeMetadataAlter(const StorageReplicatedMer
}
else
{
zookeeper->multi(requests);
zookeeper->multi(requests, /* check_session_valid */ true);
}
{
@ -10010,7 +10010,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP
ThreadFuzzer::maybeInjectSleep();
Coordination::Responses responses;
auto code = zookeeper->tryMulti(ops, responses);
auto code = zookeeper->tryMulti(ops, responses, /* check_session_valid */ true);
if (code == Coordination::Error::ZOK)
{
transaction.commit();

View File

@ -12,6 +12,7 @@
#include <Parsers/ASTSubquery.h>
#include <Parsers/ASTTablesInSelectQuery.h>
#include <Storages/AlterCommands.h>
#include <Storages/StorageView.h>
#include <Storages/StorageFactory.h>
#include <Storages/SelectQueryDescription.h>
@ -35,6 +36,7 @@ namespace ErrorCodes
{
extern const int INCORRECT_QUERY;
extern const int LOGICAL_ERROR;
extern const int NOT_IMPLEMENTED;
}
@ -90,10 +92,10 @@ bool hasJoin(const ASTSelectWithUnionQuery & ast)
/** There are no limits on the maximum size of the result for the view.
* Since the result of the view is not the result of the entire query.
*/
ContextPtr getViewContext(ContextPtr context)
ContextPtr getViewContext(ContextPtr context, const StorageSnapshotPtr & storage_snapshot)
{
auto view_context = Context::createCopy(context);
Settings view_settings = context->getSettings();
auto view_context = storage_snapshot->metadata->getSQLSecurityOverriddenContext(context);
Settings view_settings = view_context->getSettings();
view_settings.max_result_rows = 0;
view_settings.max_result_bytes = 0;
view_settings.extremes = false;
@ -122,6 +124,8 @@ StorageView::StorageView(
storage_metadata.setColumns(columns_);
storage_metadata.setComment(comment);
if (query.sql_security)
storage_metadata.setDefiner(query.sql_security->as<ASTSQLSecurity &>());
if (!query.select)
throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName());
@ -160,13 +164,13 @@ void StorageView::read(
if (context->getSettingsRef().allow_experimental_analyzer)
{
InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context), options);
InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context, storage_snapshot), options);
interpreter.addStorageLimits(*query_info.storage_limits);
query_plan = std::move(interpreter).extractQueryPlan();
}
else
{
InterpreterSelectWithUnionQuery interpreter(current_inner_query, getViewContext(context), options, column_names);
InterpreterSelectWithUnionQuery interpreter(current_inner_query, getViewContext(context, storage_snapshot), options, column_names);
interpreter.addStorageLimits(*query_info.storage_limits);
interpreter.buildQueryPlan(query_plan);
}
@ -282,6 +286,15 @@ ASTPtr StorageView::restoreViewName(ASTSelectQuery & select_query, const ASTPtr
return subquery->children[0];
}
void StorageView::checkAlterIsPossible(const AlterCommands & commands, ContextPtr /* local_context */) const
{
for (const auto & command : commands)
{
if (!command.isCommentAlter() && command.type != AlterCommand::MODIFY_SQL_SECURITY)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Alter of type '{}' is not supported by storage {}", command.type, getName());
}
}
void registerStorageView(StorageFactory & factory)
{
factory.registerStorage("View", [](const StorageFactory::Arguments & args)

View File

@ -26,6 +26,8 @@ public:
bool supportsSampling() const override { return true; }
bool supportsFinal() const override { return true; }
void checkAlterIsPossible(const AlterCommands & commands, ContextPtr local_context) const override;
void read(
QueryPlan & query_plan,
const Names & column_names,

View File

@ -22,6 +22,7 @@ ColumnsDescription StorageSystemBackups::getColumnsDescription()
{"id", std::make_shared<DataTypeString>(), "Operation ID, can be either passed via SETTINGS id=... or be randomly generated UUID."},
{"name", std::make_shared<DataTypeString>(), "Operation name, a string like `Disk('backups', 'my_backup')`"},
{"base_backup_name", std::make_shared<DataTypeString>(), "Base Backup Operation name, a string like `Disk('backups', 'my_base_backup')`"},
{"query_id", std::make_shared<DataTypeString>(), "Query ID of a query that started backup."},
{"status", std::make_shared<DataTypeEnum8>(getBackupStatusEnumValues()), "Status of backup or restore operation."},
{"error", std::make_shared<DataTypeString>(), "The error message if any."},
{"start_time", std::make_shared<DataTypeDateTime>(), "The time when operation started."},
@ -44,6 +45,7 @@ void StorageSystemBackups::fillData(MutableColumns & res_columns, ContextPtr con
auto & column_id = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_name = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_base_backup_name = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_query_id = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_status = assert_cast<ColumnInt8 &>(*res_columns[column_index++]);
auto & column_error = assert_cast<ColumnString &>(*res_columns[column_index++]);
auto & column_start_time = assert_cast<ColumnUInt32 &>(*res_columns[column_index++]);
@ -62,6 +64,7 @@ void StorageSystemBackups::fillData(MutableColumns & res_columns, ContextPtr con
column_id.insertData(info.id.data(), info.id.size());
column_name.insertData(info.name.data(), info.name.size());
column_base_backup_name.insertData(info.base_backup_name.data(), info.base_backup_name.size());
column_query_id.insertData(info.query_id.data(), info.query_id.size());
column_status.insertValue(static_cast<Int8>(info.status));
column_error.insertData(info.error_message.data(), info.error_message.size());
column_start_time.insertValue(static_cast<UInt32>(std::chrono::system_clock::to_time_t(info.start_time)));

View File

@ -29,6 +29,7 @@ namespace
VIEW,
COLUMN,
NAMED_COLLECTION,
USER_NAME,
};
DataTypeEnum8::Values getLevelEnumValues()
@ -41,6 +42,7 @@ namespace
enum_values.emplace_back("VIEW", static_cast<Int8>(VIEW));
enum_values.emplace_back("COLUMN", static_cast<Int8>(COLUMN));
enum_values.emplace_back("NAMED_COLLECTION", static_cast<Int8>(NAMED_COLLECTION));
enum_values.emplace_back("USER_NAME", static_cast<Int8>(USER_NAME));
return enum_values;
}
}

View File

@ -35,8 +35,9 @@ static constexpr std::string_view schemata = R"(
`DEFAULT_CHARACTER_SET_SCHEMA` Nullable(String),
`DEFAULT_CHARACTER_SET_NAME` Nullable(String),
`SQL_PATH` Nullable(String)
) AS
SELECT
)
SQL SECURITY INVOKER
AS SELECT
name AS catalog_name,
name AS schema_name,
'default' AS schema_owner,
@ -73,8 +74,9 @@ static constexpr std::string_view tables = R"(
`DATA_LENGTH` Nullable(UInt64),
`TABLE_COLLATION` Nullable(String),
`TABLE_COMMENT` Nullable(String)
) AS
SELECT
)
SQL SECURITY INVOKER
AS SELECT
database AS table_catalog,
database AS table_schema,
name AS table_name,
@ -122,8 +124,9 @@ static constexpr std::string_view views = R"(
`IS_TRIGGER_UPDATABLE` Enum8('NO' = 0, 'YES' = 1),
`IS_TRIGGER_DELETABLE` Enum8('NO' = 0, 'YES' = 1),
`IS_TRIGGER_INSERTABLE_INTO` Enum8('NO' = 0, 'YES' = 1)
) AS
SELECT
)
SQL SECURITY INVOKER
AS SELECT
database AS table_catalog,
database AS table_schema,
name AS table_name,
@ -203,8 +206,9 @@ static constexpr std::string_view columns = R"(
`EXTRA` Nullable(String),
`COLUMN_COMMENT` String,
`COLUMN_TYPE` String
) AS
SELECT
)
SQL SECURITY INVOKER
AS SELECT
database AS table_catalog,
database AS table_schema,
table AS table_name,
@ -291,8 +295,9 @@ static constexpr std::string_view key_column_usage = R"(
`REFERENCED_TABLE_SCHEMA` Nullable(String),
`REFERENCED_TABLE_NAME` Nullable(String),
`REFERENCED_COLUMN_NAME` Nullable(String)
) AS
SELECT
)
SQL SECURITY INVOKER
AS SELECT
'def' AS constraint_catalog,
database AS constraint_schema,
'PRIMARY' AS constraint_name,
@ -346,8 +351,9 @@ static constexpr std::string_view referential_constraints = R"(
`DELETE_RULE` String,
`TABLE_NAME` String,
`REFERENCED_TABLE_NAME` String
) AS
SELECT
)
SQL SECURITY INVOKER
AS SELECT
'' AS constraint_catalog,
NULL AS constraint_name,
'' AS constraint_schema,
@ -412,8 +418,9 @@ static constexpr std::string_view statistics = R"(
`INDEX_COMMENT` String,
`IS_VISIBLE` String,
`EXPRESSION` Nullable(String)
) AS
SELECT
)
SQL SECURITY INVOKER
AS SELECT
'' AS table_catalog,
'' AS table_schema,
'' AS table_name,

View File

@ -26,7 +26,7 @@ try
auto config = processor.loadConfig().configuration;
String root_path = argv[2];
zkutil::ZooKeeper zk(*config, zkutil::getZooKeeperConfigName(*config), nullptr);
auto zk = zkutil::ZooKeeper::createWithoutKillingPreviousSessions(*config, zkutil::getZooKeeperConfigName(*config), nullptr);
String temp_path = root_path + "/temp";
String blocks_path = root_path + "/block_numbers";
@ -34,7 +34,7 @@ try
Stopwatch total_timer;
Stopwatch timer;
EphemeralLocksInAllPartitions locks(blocks_path, "test_lock-", temp_path, zk);
EphemeralLocksInAllPartitions locks(blocks_path, "test_lock-", temp_path, *zk);
std::cerr << "Locked, elapsed: " << timer.elapsedSeconds() << std::endl;
for (const auto & lock : locks.getLocks())

View File

@ -29,7 +29,7 @@ try
auto config = processor.loadConfig().configuration;
String zookeeper_path = argv[2];
auto zookeeper = std::make_shared<zkutil::ZooKeeper>(*config, zkutil::getZooKeeperConfigName(*config), nullptr);
auto zookeeper = zkutil::ZooKeeper::createWithoutKillingPreviousSessions(*config, zkutil::getZooKeeperConfigName(*config), nullptr);
std::unordered_map<String, std::set<Int64>> current_inserts;

View File

@ -1,4 +1,4 @@
00223_shard_distributed_aggregation_memory_efficien
00223_shard_distributed_aggregation_memory_efficient
00717_merge_and_distributed
00725_memory_tracking
01062_pm_all_join_with_block_continuation

View File

@ -645,12 +645,18 @@ class CiCache:
return {}
poll_interval_sec = 300
TIMEOUT = 3600
MAX_ROUNDS_TO_WAIT = 6
MAX_JOB_NUM_TO_WAIT = 3
await_finished: Dict[str, List[int]] = {}
round_cnt = 0
while len(jobs_with_params) > 4 and round_cnt < 5:
while (
len(jobs_with_params) > MAX_JOB_NUM_TO_WAIT
and round_cnt < MAX_ROUNDS_TO_WAIT
):
round_cnt += 1
GHActions.print_in_group(
f"Wait pending jobs, round [{round_cnt}]:", list(jobs_with_params)
f"Wait pending jobs, round [{round_cnt}/{MAX_ROUNDS_TO_WAIT}]:",
list(jobs_with_params),
)
# this is initial approach to wait pending jobs:
# start waiting for the next TIMEOUT seconds if there are more than X(=4) jobs to wait

View File

@ -65,9 +65,13 @@ class JobNames(metaclass=WithIter):
STATELESS_TEST_TSAN = "Stateless tests (tsan)"
STATELESS_TEST_MSAN = "Stateless tests (msan)"
STATELESS_TEST_UBSAN = "Stateless tests (ubsan)"
STATELESS_TEST_ANALYZER_RELEASE = "Stateless tests (release, analyzer)"
STATELESS_TEST_DB_REPL_RELEASE = "Stateless tests (release, DatabaseReplicated)"
STATELESS_TEST_S3_RELEASE = "Stateless tests (release, s3 storage)"
STATELESS_TEST_ANALYZER_S3_REPLICATED_RELEASE = (
"Stateless tests (release, analyzer, s3, DatabaseReplicated)"
)
# merged into STATELESS_TEST_ANALYZER_S3_REPLICATED_RELEASE:
# STATELESS_TEST_ANALYZER_RELEASE = "Stateless tests (release, analyzer)"
# STATELESS_TEST_DB_REPL_RELEASE = "Stateless tests (release, DatabaseReplicated)"
# STATELESS_TEST_S3_RELEASE = "Stateless tests (release, s3 storage)"
STATELESS_TEST_S3_DEBUG = "Stateless tests (debug, s3 storage)"
STATELESS_TEST_S3_TSAN = "Stateless tests (tsan, s3 storage)"
STATELESS_TEST_FLAKY_ASAN = "Stateless tests flaky check (asan)"
@ -968,17 +972,10 @@ CI_CONFIG = CIConfig(
JobNames.STATELESS_TEST_AARCH64: TestConfig(
Build.PACKAGE_AARCH64, job_config=JobConfig(**statless_test_common_params) # type: ignore
),
JobNames.STATELESS_TEST_ANALYZER_RELEASE: TestConfig(
Build.PACKAGE_RELEASE, job_config=JobConfig(**statless_test_common_params) # type: ignore
),
JobNames.STATELESS_TEST_DB_REPL_RELEASE: TestConfig(
JobNames.STATELESS_TEST_ANALYZER_S3_REPLICATED_RELEASE: TestConfig(
Build.PACKAGE_RELEASE,
job_config=JobConfig(num_batches=4, **statless_test_common_params), # type: ignore
),
JobNames.STATELESS_TEST_S3_RELEASE: TestConfig(
Build.PACKAGE_RELEASE,
job_config=JobConfig(num_batches=2, **statless_test_common_params), # type: ignore
),
JobNames.STATELESS_TEST_S3_DEBUG: TestConfig(
Build.PACKAGE_DEBUG,
job_config=JobConfig(num_batches=6, **statless_test_common_params), # type: ignore
@ -1016,7 +1013,7 @@ CI_CONFIG = CIConfig(
),
JobNames.INTEGRATION_TEST_ASAN: TestConfig(
Build.PACKAGE_ASAN,
job_config=JobConfig(num_batches=4, **integration_test_common_params), # type: ignore
job_config=JobConfig(num_batches=4, **integration_test_common_params, release_only=True), # type: ignore
),
JobNames.INTEGRATION_TEST_ASAN_ANALYZER: TestConfig(
Build.PACKAGE_ASAN,
@ -1031,12 +1028,9 @@ CI_CONFIG = CIConfig(
# add [run_by_label="test arm"] to not run in regular pr workflow by default
job_config=JobConfig(num_batches=6, **integration_test_common_params, run_by_label="test arm"), # type: ignore
),
# FIXME: currently no wf has this job. Try to enable
# "Integration tests (msan)": TestConfig(Build.PACKAGE_MSAN, job_config=JobConfig(num_batches=6, **integration_test_common_params) # type: ignore
# ),
JobNames.INTEGRATION_TEST: TestConfig(
Build.PACKAGE_RELEASE,
job_config=JobConfig(num_batches=4, **integration_test_common_params), # type: ignore
job_config=JobConfig(num_batches=4, **integration_test_common_params, release_only=True), # type: ignore
),
JobNames.INTEGRATION_TEST_FLAKY: TestConfig(
Build.PACKAGE_ASAN, job_config=JobConfig(pr_only=True, **integration_test_common_params) # type: ignore
@ -1145,7 +1139,7 @@ REQUIRED_CHECKS = [
JobNames.UNIT_TEST_TSAN,
JobNames.UNIT_TEST_UBSAN,
JobNames.INTEGRATION_TEST_ASAN_ANALYZER,
JobNames.STATELESS_TEST_ANALYZER_RELEASE,
JobNames.STATELESS_TEST_ANALYZER_S3_REPLICATED_RELEASE,
]

View File

@ -465,10 +465,10 @@ def test_user_specific_auth(start_cluster):
node.query("CREATE TABLE specific_auth (col UInt64) ENGINE=Memory")
assert "Access Denied" in node.query_and_get_error(
assert "Access" in node.query_and_get_error(
"BACKUP TABLE specific_auth TO S3('http://minio1:9001/root/data/backups/limited/backup1.zip')"
)
assert "Access Denied" in node.query_and_get_error(
assert "Access" in node.query_and_get_error(
"BACKUP TABLE specific_auth TO S3('http://minio1:9001/root/data/backups/limited/backup1.zip')",
user="regularuser",
)
@ -491,7 +491,7 @@ def test_user_specific_auth(start_cluster):
user="superuser2",
)
assert "Access Denied" in node.query_and_get_error(
assert "Access" in node.query_and_get_error(
"RESTORE TABLE specific_auth FROM S3('http://minio1:9001/root/data/backups/limited/backup1.zip')",
user="regularuser",
)

View File

@ -195,6 +195,10 @@ def test_create_table():
f"DeltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '{password}')",
"DNS_ERROR",
),
f"S3Queue('http://minio1:9001/root/data/', 'CSV')",
f"S3Queue('http://minio1:9001/root/data/', 'CSV', 'gzip')",
f"S3Queue('http://minio1:9001/root/data/', 'minio', '{password}', 'CSV')",
f"S3Queue('http://minio1:9001/root/data/', 'minio', '{password}', 'CSV', 'gzip')",
]
def make_test_case(i):
@ -254,6 +258,10 @@ def test_create_table():
"CREATE TABLE table14 (x int) ENGINE = S3('http://minio1:9001/root/data/test9.csv.gz', 'NOSIGN', 'CSV', 'gzip')",
"CREATE TABLE table15 (`x` int) ENGINE = S3('http://minio1:9001/root/data/test10.csv.gz', 'minio', '[HIDDEN]')",
"CREATE TABLE table16 (`x` int) ENGINE = DeltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')",
"CREATE TABLE table17 (x int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'CSV')",
"CREATE TABLE table18 (x int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'CSV', 'gzip')",
"CREATE TABLE table19 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV')",
"CREATE TABLE table20 (`x` int) ENGINE = S3Queue('http://minio1:9001/root/data/', 'minio', '[HIDDEN]', 'CSV', 'gzip')",
],
must_not_contain=[password],
)

View File

@ -723,6 +723,7 @@ def test_materialized_view(started_cluster):
pg_manager.execute(f"INSERT INTO test_table SELECT 3, 4")
check_tables_are_synchronized(instance, "test_table")
assert "1\t2\n3\t4" == instance.query("SELECT * FROM mv ORDER BY 1, 2").strip()
instance.query("DROP VIEW mv")
pg_manager.drop_materialized_db()

View File

@ -0,0 +1,3 @@
<clickhouse>
<format_alter_operations_with_parentheses>1</format_alter_operations_with_parentheses>
</clickhouse>

View File

@ -0,0 +1,45 @@
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
"node",
main_configs=[
"configs/format_alter_operations_with_parentheses.xml",
],
)
@pytest.fixture(scope="module", autouse=True)
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_alter():
INPUT = """
SELECT '--- Alter commands in parens';
SELECT formatQuery('ALTER TABLE a (MODIFY TTL expr GROUP BY some_key), (ADD COLUMN a Int64)');
SELECT formatQuery('ALTER TABLE a (MODIFY TTL expr TO VOLUME \\'vol1\\', expr2 + INTERVAL 2 YEAR TO VOLUME \\'vol2\\'), (DROP COLUMN c)');
SELECT '--- Check only consistent parens around alter commands are accepted';
SELECT formatQuery('ALTER TABLE a (DROP COLUMN b), DROP COLUMN c'); -- { serverError SYNTAX_ERROR }
SELECT formatQuery('ALTER TABLE a DROP COLUMN b, (DROP COLUMN c)'); -- { serverError SYNTAX_ERROR }
SELECT formatQuery('ALTER TABLE a (DROP COLUMN b), (DROP COLUMN c)');
SELECT formatQuery('ALTER TABLE a DROP COLUMN b, DROP COLUMN c'); -- Make sure it is backward compatible
"""
EXPECTED_OUTPUT = """--- Alter commands in parens
ALTER TABLE a\\n (MODIFY TTL expr GROUP BY some_key),\\n (ADD COLUMN `a` Int64)
ALTER TABLE a\\n (MODIFY TTL expr TO VOLUME \\'vol1\\', expr2 + toIntervalYear(2) TO VOLUME \\'vol2\\'),\\n (DROP COLUMN c)
--- Check only consistent parens around alter commands are accepted
ALTER TABLE a\\n (DROP COLUMN b),\\n (DROP COLUMN c)
ALTER TABLE a\\n (DROP COLUMN b),\\n (DROP COLUMN c)
"""
result = node.query(INPUT)
assert result == EXPECTED_OUTPUT

View File

@ -1 +1 @@
CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n) AS\nSELECT *\nFROM default.test_00599\nWHERE id = (\n SELECT 1\n)
CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n)\nSQL SECURITY INVOKER\nAS SELECT *\nFROM default.test_00599\nWHERE id = (\n SELECT 1\n)

View File

@ -6,8 +6,9 @@ CREATE MATERIALIZED VIEW default.t_mv_00751
)
ENGINE = MergeTree
ORDER BY date
SETTINGS index_granularity = 8192 AS
SELECT
SETTINGS index_granularity = 8192
DEFINER = default SQL SECURITY DEFINER
AS SELECT
date,
platform,
app

Some files were not shown because too many files have changed in this diff Show More