Merge remote-tracking branch 'blessed/master' into parallel_replicas_cte_2

This commit is contained in:
Raúl Marín 2024-02-26 14:31:20 +01:00
commit 2fa04bb37c
166 changed files with 3849 additions and 1473 deletions

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 5bb3a0e8257bacd65b099cb1b7239bd6b9a2c477
Subproject commit 4a12f99dfc9d47c687ff7700b927cc76856225d1

2
contrib/aws vendored

@ -1 +1 @@
Subproject commit 9eb5097a0abfa837722cca7a5114a25837817bf2
Subproject commit 5f0542b3ad7eef25b0540d37d778207e0345ea8f

2
contrib/rapidjson vendored

@ -1 +1 @@
Subproject commit a9bc56c9165f1dbbbcada64221bd3a59042c5b95
Subproject commit 800ca2f38fc3b387271d9e1926fcfc9070222104

View File

@ -387,6 +387,11 @@ if [ -f core.zst ]; then
fi
rg --text -F '<Fatal>' server.log > fatal.log ||:
FATAL_LINK=''
if [ -s fatal.log ]; then
FATAL_LINK='<a href="fatal.log">fatal.log</a>'
fi
dmesg -T > dmesg.log ||:
zstd --threads=0 --rm server.log
@ -419,6 +424,7 @@ p.links a { padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-s
<a href="main.log">main.log</a>
<a href="dmesg.log">dmesg.log</a>
${CORE_LINK}
${FATAL_LINK}
</p>
<table>
<tr>

View File

@ -88,6 +88,7 @@ rm /etc/clickhouse-server/config.d/merge_tree.xml
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
rm /etc/clickhouse-server/config.d/backoff_failed_mutation.xml
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml
@ -134,6 +135,7 @@ rm /etc/clickhouse-server/config.d/merge_tree.xml
rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
rm /etc/clickhouse-server/config.d/storage_conf_02963.xml
rm /etc/clickhouse-server/config.d/backoff_failed_mutation.xml
rm /etc/clickhouse-server/config.d/block_number.xml
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
rm /etc/clickhouse-server/users.d/s3_cache_new.xml

View File

@ -37,7 +37,7 @@ sudo xcode-select --install
``` bash
brew update
brew install ccache cmake ninja libtool gettext llvm gcc binutils grep findutils
brew install ccache cmake ninja libtool gettext llvm gcc binutils grep findutils nasm
```
## Checkout ClickHouse Sources {#checkout-clickhouse-sources}

View File

@ -10,7 +10,7 @@ Allows to connect to databases on a remote [PostgreSQL](https://www.postgresql.o
Gives the real-time access to table list and table structure from remote PostgreSQL with the help of `SHOW TABLES` and `DESCRIBE TABLE` queries.
Supports table structure modifications (`ALTER TABLE ... ADD|DROP COLUMN`). If `use_table_cache` parameter (see the Engine Parameters below) it set to `1`, the table structure is cached and not checked for being modified, but can be updated with `DETACH` and `ATTACH` queries.
Supports table structure modifications (`ALTER TABLE ... ADD|DROP COLUMN`). If `use_table_cache` parameter (see the Engine Parameters below) is set to `1`, the table structure is cached and not checked for being modified, but can be updated with `DETACH` and `ATTACH` queries.
## Creating a Database {#creating-a-database}

View File

@ -4279,6 +4279,41 @@ Result:
└─────┴─────┴───────┘
```
## enable_order_by_all {#enable-order-by-all}
Enables or disables sorting with `ORDER BY ALL` syntax, see [ORDER BY](../../sql-reference/statements/select/order-by.md).
Possible values:
- 0 — Disable ORDER BY ALL.
- 1 — Enable ORDER BY ALL.
Default value: `1`.
**Example**
Query:
```sql
CREATE TABLE TAB(C1 Int, C2 Int, ALL Int) ENGINE=Memory();
INSERT INTO TAB VALUES (10, 20, 30), (20, 20, 10), (30, 10, 20);
SELECT * FROM TAB ORDER BY ALL; -- returns an error that ALL is ambiguous
SELECT * FROM TAB ORDER BY ALL SETTINGS enable_order_by_all = 0;
```
Result:
```text
┌─C1─┬─C2─┬─ALL─┐
│ 20 │ 20 │ 10 │
│ 30 │ 10 │ 20 │
│ 10 │ 20 │ 30 │
└────┴────┴─────┘
```
## splitby_max_substrings_includes_remaining_string {#splitby_max_substrings_includes_remaining_string}
Controls whether function [splitBy*()](../../sql-reference/functions/splitting-merging-functions.md) with argument `max_substrings` > 0 will include the remaining string in the last element of the result array.
@ -5345,24 +5380,6 @@ SELECT map('a', range(number), 'b', number, 'c', 'str_' || toString(number)) as
Default value: `false`.
## default_normal_view_sql_security {#default_normal_view_sql_security}
Allows to set default `SQL SECURITY` option while creating a normal view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
The default value is `INVOKER`.
## default_materialized_view_sql_security {#default_materialized_view_sql_security}
Allows to set a default value for SQL SECURITY option when creating a materialized view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
The default value is `DEFINER`.
## default_view_definer {#default_view_definer}
Allows to set default `DEFINER` option while creating a view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security).
The default value is `CURRENT_USER`.
## max_partition_size_to_drop
Restriction on dropping partitions in query time. The value 0 means that you can drop partitions without any restrictions.

View File

@ -0,0 +1,32 @@
---
slug: /en/operations/system-tables/settings_changes
---
# settings_changes
Contains information about setting changes in previous ClickHouse versions.
Columns:
- `version` ([String](../../sql-reference/data-types/string.md)) — The ClickHouse version in which settings were changed
- `changes` ([Array](../../sql-reference/data-types/array.md) of [Tuple](../../sql-reference/data-types/tuple.md)) — A description of the setting changes: (setting name, previous value, new value, reason for the change)
**Example**
``` sql
SELECT *
FROM system.settings_changes
WHERE version = '23.5'
FORMAT Vertical
```
``` text
Row 1:
──────
version: 23.5
changes: [('input_format_parquet_preserve_order','1','0','Allow Parquet reader to reorder rows for better parallelism.'),('parallelize_output_from_storages','0','1','Allow parallelism when executing queries that read from file/url/s3/etc. This may reorder rows.'),('use_with_fill_by_sorting_prefix','0','1','Columns preceding WITH FILL columns in ORDER BY clause form sorting prefix. Rows with different values in sorting prefix are filled independently'),('output_format_parquet_compliant_nested_types','0','1','Change an internal field name in output Parquet file schema.')]
```
**See also**
- [Settings](../../operations/settings/index.md#session-settings-intro)
- [system.settings](settings.md)

View File

@ -13,9 +13,7 @@ Creates a new view. Views can be [normal](#normal-view), [materialized](#materia
Syntax:
``` sql
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ...
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] AS SELECT ...
```
Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause.
@ -54,9 +52,7 @@ SELECT * FROM view(column1=value1, column2=value2 ...)
## Materialized View
``` sql
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ...
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
```
:::tip
@ -95,49 +91,6 @@ Views look the same as normal tables. For example, they are listed in the result
To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Although `DROP TABLE` works for VIEWs as well.
## SQL security {#sql_security}
`DEFINER` and `SQL SECURITY` allow you to specify which ClickHouse user to use when executing the view's underlying query.
`SQL SECURITY` has three legal values: `DEFINER`, `INVOKER`, or `NONE`. You can specify any existing user or `CURRENT_USER` in the `DEFINER` clause.
The following table will explain which rights are required for which user in order to select from view.
Note that regardless of the SQL security option, in every case it is still required to have `GRANT SELECT ON <view>` in order to read from it.
| SQL security option | View | Materialized View |
|---------------------|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
| `DEFINER alice` | `alice` must have a `SELECT` grant for the view's source table. | `alice` must have a `SELECT` grant for the view's source table and an `INSERT` grant for the view's target table. |
| `INVOKER` | User must have a `SELECT` grant for the view's source table. | `SQL SECURITY INVOKER` can't be specified for materialized views. |
| `NONE` | - | - |
:::note
`SQL SECURITY NONE` is a deprecated option. Any user with the rights to create views with `SQL SECURITY NONE` will be able to execute any arbitrary query.
Thus, it is required to have `GRANT ALLOW SQL SECURITY NONE TO <user>` in order to create a view with this option.
:::
If `DEFINER`/`SQL SECURITY` aren't specified, the default values are used:
- `SQL SECURITY`: `INVOKER` for normal views and `DEFINER` for materialized views ([configurable by settings](../../../operations/settings/settings.md#default_normal_view_sql_security))
- `DEFINER`: `CURRENT_USER` ([configurable by settings](../../../operations/settings/settings.md#default_view_definer))
If a view is attached without `DEFINER`/`SQL SECURITY` specified, the default value is `SQL SECURITY NONE` for the materialized view and `SQL SECURITY INVOKER` for the normal view.
To change SQL security for an existing view, use
```sql
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
```
### Examples sql security
```sql
CREATE test_view
DEFINER = alice SQL SECURITY DEFINER
AS SELECT ...
```
```sql
CREATE test_view
SQL SECURITY INVOKER
AS SELECT ...
```
## Live View [Deprecated]
This feature is deprecated and will be removed in the future.

View File

@ -114,7 +114,6 @@ Hierarchy of privileges:
- `ALTER VIEW`
- `ALTER VIEW REFRESH`
- `ALTER VIEW MODIFY QUERY`
- `ALTER VIEW MODIFY SQL SECURITY`
- [CREATE](#grant-create)
- `CREATE DATABASE`
- `CREATE TABLE`
@ -308,7 +307,6 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries
- `ALTER VIEW` Level: `GROUP`
- `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW`
- `ALTER VIEW MODIFY QUERY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY QUERY`
- `ALTER VIEW MODIFY SQL SECURITY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY SQL SECURITY`
Examples of how this hierarchy is treated:
@ -411,7 +409,6 @@ Allows a user to execute queries that manage users, roles and row policies.
- `SHOW_ROW_POLICIES`. Level: `GLOBAL`. Aliases: `SHOW POLICIES`, `SHOW CREATE ROW POLICY`, `SHOW CREATE POLICY`
- `SHOW_QUOTAS`. Level: `GLOBAL`. Aliases: `SHOW CREATE QUOTA`
- `SHOW_SETTINGS_PROFILES`. Level: `GLOBAL`. Aliases: `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`, `SHOW CREATE PROFILE`
- `ALLOW SQL SECURITY NONE`. Level: `GLOBAL`. Aliases: `CREATE SQL SECURITY NONE`, `SQL SECURITY NONE`, `SECURITY NONE`
The `ROLE ADMIN` privilege allows a user to assign and revoke any roles including those which are not assigned to the user with the admin option.

View File

@ -9,9 +9,10 @@ The `ORDER BY` clause contains
- a list of expressions, e.g. `ORDER BY visits, search_phrase`,
- a list of numbers referring to columns in the `SELECT` clause, e.g. `ORDER BY 2, 1`, or
- `*` (without other expressions or numbers) which means all columns of the `SELECT` clause: `ORDER BY *`.
- `ALL` which means all columns of the `SELECT` clause, e.g. `ORDER BY ALL`.
To disable sorting by column numbers, set setting [enable_positional_arguments](../../../operations/settings/settings.md#enable-positional-arguments) = 0.
To disable sorting by `ALL`, set setting [enable_order_by_all](../../../operations/settings/settings.md#enable-order-by-all) = 0.
The `ORDER BY` clause can be attributed by a `DESC` (descending) or `ASC` (ascending) modifier which determines the sorting direction.
Unless an explicit sort order is specified, `ASC` is used by default.

View File

@ -11,11 +11,11 @@ Creates a temporary [Merge](../../engines/table-engines/special/merge.md) table.
**Syntax**
```sql
merge('db_name', 'tables_regexp')
merge(['db_name',] 'tables_regexp')
```
**Arguments**
- `db_name` — Possible values:
- `db_name` — Possible values (optional, default is `currentDatabase()`):
- database name,
- constant expression that returns a string with a database name, for example, `currentDatabase()`,
- `REGEXP(expression)`, where `expression` is a regular expression to match the DB names.

View File

@ -11,9 +11,7 @@ sidebar_label: "Представление"
## Обычные представления {#normal}
``` sql
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ...
CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] AS SELECT ...
```
Обычные представления не хранят никаких данных, они выполняют чтение данных из другой таблицы при каждом доступе. Другими словами, обычное представление — это не что иное, как сохраненный запрос. При чтении данных из представления этот сохраненный запрос используется как подзапрос в секции [FROM](../../../sql-reference/statements/select/from.md).
@ -39,9 +37,7 @@ SELECT a, b, c FROM (SELECT ...)
## Материализованные представления {#materialized}
``` sql
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE]
[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }]
AS SELECT ...
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ...
```
Материализованные (MATERIALIZED) представления хранят данные, преобразованные соответствующим запросом [SELECT](../../../sql-reference/statements/select/index.md).
@ -70,52 +66,6 @@ AS SELECT ...
Чтобы удалить представление, следует использовать [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Впрочем, `DROP TABLE` тоже работает для представлений.
## SQL безопасность {#sql_security}
Параметры `DEFINER` и `SQL SECURITY` позволяют задать правило от имени какого пользователя будут выполняться запросы к таблицам, на которые ссылается представление.
Для `SQL SECURITY` допустимо три значения: `DEFINER`, `INVOKER`, или `NONE`.
Для `DEFINER` можно указать имя любого существующего пользователя или же `CURRENT_USER`.
Далее приведена таблица, объясняющая какие права необходимы каким пользователям при заданных параметрах SQL безопасности.
Обратите внимание, что, в независимости от заданных параметров SQL безопасности,
у пользователя должно быть право `GRANT SELECT ON <view>` для чтения из представления.
| SQL security option | View | Materialized View |
|---------------------|----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
| `DEFINER alice` | У `alice` должно быть право `SELECT` на таблицу-источник. | У `alice` должны быть права `SELECT` на таблицу-источник и `INSERT` на таблицу-назначение. |
| `INVOKER` | У пользователя выполняющего запрос к представлению должно быть право `SELECT` на таблицу-источник. | Тип `SQL SECURITY INVOKER` не может быть указан для материализованных представлений. |
| `NONE` | - | - |
:::note
Тип `SQL SECURITY NONE` не безопасен для использования. Любой пользователь с правом создавать представления с `SQL SECURITY NONE` сможет исполнять любые запросы без проверки прав.
По умолчанию, у пользователей нет прав указывать `SQL SECURITY NONE`, однако, при необходимости, это право можно выдать с помощью `GRANT ALLOW SQL SECURITY NONE TO <user>`.
:::
Если `DEFINER`/`SQL SECURITY` не указан, будут использованы значения по умолчанию:
- `SQL SECURITY`: `INVOKER` для обычных представлений и `DEFINER` для материализованных ([изменяется в настройках](../../../operations/settings/settings.md#default_normal_view_sql_security))
- `DEFINER`: `CURRENT_USER` ([изменяется в настройках](../../../operations/settings/settings.md#default_view_definer))
Если представление подключается с помощью ключевого слова `ATTACH` и настройки SQL безопасности не были заданы,
то по умолчанию будет использоваться `SQL SECURITY NONE` для материализованных представлений и `SQL SECURITY INVOKER` для обычных.
Изменить параметры SQL безопасности возможно с помощью следующего запроса:
```sql
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
```
### Примеры представлений с SQL безопасностью
```sql
CREATE test_view
DEFINER = alice SQL SECURITY DEFINER
AS SELECT ...
```
```sql
CREATE test_view
SQL SECURITY INVOKER
AS SELECT ...
```
## LIVE-представления [экспериментальный функционал] {#live-view}
:::note Важно

View File

@ -61,14 +61,14 @@ sidebar_label: ORDER BY
我们只建议使用 `COLLATE` 对于少量行的最终排序,因为排序与 `COLLATE` 比正常的按字节排序效率低。
## ORDER BY *
## ORDER BY ALL
`ORDER BY *` 对所有选定的列进行升序排序。
`ORDER BY ALL` 对所有选定的列进行升序排序。
示例:
``` sql
SELECT a, b, c FROM t ORDER BY *
SELECT a, b, c FROM t ORDER BY ALL
```
等同于:

View File

@ -103,7 +103,6 @@ namespace
const Flags & getColumnFlags() const { return all_flags_for_target[COLUMN]; }
const Flags & getDictionaryFlags() const { return all_flags_for_target[DICTIONARY]; }
const Flags & getNamedCollectionFlags() const { return all_flags_for_target[NAMED_COLLECTION]; }
const Flags & getUserNameFlags() const { return all_flags_for_target[USER_NAME]; }
const Flags & getAllFlagsGrantableOnGlobalLevel() const { return getAllFlags(); }
const Flags & getAllFlagsGrantableOnGlobalWithParameterLevel() const { return getGlobalWithParameterFlags(); }
const Flags & getAllFlagsGrantableOnDatabaseLevel() const { return all_flags_grantable_on_database_level; }
@ -122,7 +121,6 @@ namespace
COLUMN,
DICTIONARY,
NAMED_COLLECTION,
USER_NAME,
};
struct Node;
@ -302,7 +300,7 @@ namespace
collectAllFlags(child.get());
all_flags_grantable_on_table_level = all_flags_for_target[TABLE] | all_flags_for_target[DICTIONARY] | all_flags_for_target[COLUMN];
all_flags_grantable_on_global_with_parameter_level = all_flags_for_target[NAMED_COLLECTION] | all_flags_for_target[USER_NAME];
all_flags_grantable_on_global_with_parameter_level = all_flags_for_target[NAMED_COLLECTION];
all_flags_grantable_on_database_level = all_flags_for_target[DATABASE] | all_flags_grantable_on_table_level;
}
@ -353,7 +351,7 @@ namespace
std::unordered_map<std::string_view, Flags> keyword_to_flags_map;
std::vector<Flags> access_type_to_flags_mapping;
Flags all_flags;
Flags all_flags_for_target[static_cast<size_t>(USER_NAME) + 1];
Flags all_flags_for_target[static_cast<size_t>(NAMED_COLLECTION) + 1];
Flags all_flags_grantable_on_database_level;
Flags all_flags_grantable_on_table_level;
Flags all_flags_grantable_on_global_with_parameter_level;
@ -373,11 +371,7 @@ std::unordered_map<AccessFlags::ParameterType, AccessFlags> AccessFlags::splitIn
if (named_collection_flags)
result.emplace(ParameterType::NAMED_COLLECTION, named_collection_flags);
auto user_flags = AccessFlags::allUserNameFlags() & *this;
if (user_flags)
result.emplace(ParameterType::USER_NAME, user_flags);
auto other_flags = (~named_collection_flags & ~user_flags) & *this;
auto other_flags = (~AccessFlags::allNamedCollectionFlags()) & *this;
if (other_flags)
result.emplace(ParameterType::NONE, other_flags);
@ -393,9 +387,6 @@ AccessFlags::ParameterType AccessFlags::getParameterType() const
if (AccessFlags::allNamedCollectionFlags().contains(*this))
return AccessFlags::NAMED_COLLECTION;
if (AccessFlags::allUserNameFlags().contains(*this))
return AccessFlags::USER_NAME;
throw Exception(ErrorCodes::MIXED_ACCESS_PARAMETER_TYPES, "Having mixed parameter types: {}", toString());
}
@ -414,7 +405,6 @@ AccessFlags AccessFlags::allTableFlags() { return Helper::instance().getTableFla
AccessFlags AccessFlags::allColumnFlags() { return Helper::instance().getColumnFlags(); }
AccessFlags AccessFlags::allDictionaryFlags() { return Helper::instance().getDictionaryFlags(); }
AccessFlags AccessFlags::allNamedCollectionFlags() { return Helper::instance().getNamedCollectionFlags(); }
AccessFlags AccessFlags::allUserNameFlags() { return Helper::instance().getUserNameFlags(); }
AccessFlags AccessFlags::allFlagsGrantableOnGlobalLevel() { return Helper::instance().getAllFlagsGrantableOnGlobalLevel(); }
AccessFlags AccessFlags::allFlagsGrantableOnGlobalWithParameterLevel() { return Helper::instance().getAllFlagsGrantableOnGlobalWithParameterLevel(); }
AccessFlags AccessFlags::allFlagsGrantableOnDatabaseLevel() { return Helper::instance().getAllFlagsGrantableOnDatabaseLevel(); }

View File

@ -57,7 +57,6 @@ public:
{
NONE,
NAMED_COLLECTION,
USER_NAME,
};
ParameterType getParameterType() const;
std::unordered_map<ParameterType, AccessFlags> splitIntoParameterTypes() const;
@ -104,9 +103,6 @@ public:
/// Returns all the flags related to a named collection.
static AccessFlags allNamedCollectionFlags();
/// Returns all the flags related to a user.
static AccessFlags allUserNameFlags();
/// Returns all the flags which could be granted on the global level.
/// The same as allFlags().
static AccessFlags allFlagsGrantableOnGlobalLevel();

View File

@ -12,7 +12,7 @@ enum class AccessType
/// Macro M should be defined as M(name, aliases, node_type, parent_group_name)
/// where name is identifier with underscores (instead of spaces);
/// aliases is a string containing comma-separated list;
/// node_type either specifies access type's level (GLOBAL/NAMED_COLLECTION/USER_NAME/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS),
/// node_type either specifies access type's level (GLOBAL/NAMED_COLLECTION/DATABASE/TABLE/DICTIONARY/VIEW/COLUMNS),
/// or specifies that the access type is a GROUP of other access types;
/// parent_group_name is the name of the group containing this access type (or NONE if there is no such group).
/// NOTE A parent group must be declared AFTER all its children.
@ -83,7 +83,6 @@ enum class AccessType
M(ALTER_VIEW_REFRESH, "ALTER LIVE VIEW REFRESH, REFRESH VIEW", VIEW, ALTER_VIEW) \
M(ALTER_VIEW_MODIFY_QUERY, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \
M(ALTER_VIEW_MODIFY_REFRESH, "ALTER TABLE MODIFY QUERY", VIEW, ALTER_VIEW) \
M(ALTER_VIEW_MODIFY_SQL_SECURITY, "ALTER TABLE MODIFY SQL SECURITY", VIEW, ALTER_VIEW) \
M(ALTER_VIEW, "", GROUP, ALTER) /* allows to execute ALTER VIEW REFRESH, ALTER VIEW MODIFY QUERY, ALTER VIEW MODIFY REFRESH;
implicitly enabled by the grant ALTER_TABLE */\
\
@ -140,7 +139,6 @@ enum class AccessType
M(CREATE_SETTINGS_PROFILE, "CREATE PROFILE", GLOBAL, ACCESS_MANAGEMENT) \
M(ALTER_SETTINGS_PROFILE, "ALTER PROFILE", GLOBAL, ACCESS_MANAGEMENT) \
M(DROP_SETTINGS_PROFILE, "DROP PROFILE", GLOBAL, ACCESS_MANAGEMENT) \
M(ALLOW_SQL_SECURITY_NONE, "CREATE SQL SECURITY NONE, ALLOW SQL SECURITY NONE, SQL SECURITY NONE, SECURITY NONE", GLOBAL, ACCESS_MANAGEMENT) \
M(SHOW_USERS, "SHOW CREATE USER", GLOBAL, SHOW_ACCESS) \
M(SHOW_ROLES, "SHOW CREATE ROLE", GLOBAL, SHOW_ACCESS) \
M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", TABLE, SHOW_ACCESS) \
@ -152,7 +150,6 @@ enum class AccessType
M(SHOW_NAMED_COLLECTIONS_SECRETS, "SHOW NAMED COLLECTIONS SECRETS", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
M(NAMED_COLLECTION, "NAMED COLLECTION USAGE, USE NAMED COLLECTION", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
M(NAMED_COLLECTION_ADMIN, "NAMED COLLECTION CONTROL", NAMED_COLLECTION, ALL) \
M(SET_DEFINER, "", USER_NAME, ALL) \
\
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \
M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \

View File

@ -1,9 +0,0 @@
#pragma once
#include <Core/Types.h>
enum class SQLSecurityType
{
INVOKER,
DEFINER,
NONE,
};

View File

@ -53,8 +53,7 @@ TEST(AccessRights, Union)
"SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, "
"SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, "
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, "
"GRANT SET DEFINER ON db1, GRANT NAMED COLLECTION ADMIN ON db1");
"SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION ADMIN ON db1");
}

View File

@ -80,6 +80,8 @@
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/Identifier.h>
#include <boost/algorithm/string.hpp>
namespace ProfileEvents
{
extern const Event ScalarSubqueriesGlobalCacheHit;
@ -120,6 +122,7 @@ namespace ErrorCodes
extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH;
extern const int FUNCTION_CANNOT_HAVE_PARAMETERS;
extern const int SYNTAX_ERROR;
extern const int UNEXPECTED_EXPRESSION;
extern const int INVALID_IDENTIFIER;
}
@ -1214,7 +1217,7 @@ private:
static void expandGroupByAll(QueryNode & query_tree_node_typed);
void expandOrderByAll(QueryNode & query_tree_node_typed);
void expandOrderByAll(QueryNode & query_tree_node_typed, const Settings & settings);
static std::string
rewriteAggregateFunctionNameIfNeeded(const std::string & aggregate_function_name, NullsAction action, const ContextPtr & context);
@ -2366,9 +2369,9 @@ void QueryAnalyzer::expandGroupByAll(QueryNode & query_tree_node_typed)
query_tree_node_typed.setIsGroupByAll(false);
}
void QueryAnalyzer::expandOrderByAll(QueryNode & query_tree_node_typed)
void QueryAnalyzer::expandOrderByAll(QueryNode & query_tree_node_typed, const Settings & settings)
{
if (!query_tree_node_typed.isOrderByAll())
if (!settings.enable_order_by_all || !query_tree_node_typed.isOrderByAll())
return;
auto * all_node = query_tree_node_typed.getOrderBy().getNodes()[0]->as<SortNode>();
@ -2381,6 +2384,13 @@ void QueryAnalyzer::expandOrderByAll(QueryNode & query_tree_node_typed)
for (auto & node : projection_nodes)
{
/// Detect and reject ambiguous statements:
/// E.g. for a table with columns "all", "a", "b":
/// - SELECT all, a, b ORDER BY all; -- should we sort by all columns in SELECT or by column "all"?
/// - SELECT a, b AS all ORDER BY all; -- like before but "all" as alias
/// - SELECT func(...) AS all ORDER BY all; -- like before but "all" as function
/// - SELECT a, b ORDER BY all; -- tricky in other way: does the user want to sort by columns in SELECT clause or by not SELECTed column "all"?
auto resolved_expression_it = resolved_expressions.find(node);
if (resolved_expression_it != resolved_expressions.end())
{
@ -2389,6 +2399,9 @@ void QueryAnalyzer::expandOrderByAll(QueryNode & query_tree_node_typed)
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Expression nodes list expected 1 projection names. Actual {}",
projection_names.size());
if (boost::iequals(projection_names[0], "all"))
throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION,
"Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again");
}
auto sort_node = std::make_shared<SortNode>(node, all_node->getSortDirection(), all_node->getNullsSortDirection());
@ -5118,6 +5131,15 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
true /*allow_lambda_expression*/,
allow_table_expressions /*allow_table_expression*/);
if (function_node_ptr->toAST()->hasSecretParts())
{
for (auto & argument : arguments_projection_names)
{
SipHash hash;
hash.update(argument);
argument = getHexUIntLowercase(hash.get128());
}
}
auto & function_node = *function_node_ptr;
/// Replace right IN function argument if it is table or table function with subquery that read ordinary columns
@ -7555,7 +7577,7 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
if (settings.enable_positional_arguments)
replaceNodesWithPositionalArguments(query_node_typed.getOrderByNode(), query_node_typed.getProjection().getNodes(), scope);
expandOrderByAll(query_node_typed);
expandOrderByAll(query_node_typed, settings);
resolveSortNodeList(query_node_typed.getOrderByNode(), scope);
}

View File

@ -219,13 +219,13 @@ public:
is_group_by_all = is_group_by_all_value;
}
/// Returns true, if query node has ORDER BY * modifier, false otherwise
/// Returns true, if query node has ORDER BY ALL modifier, false otherwise
bool isOrderByAll() const
{
return is_order_by_all;
}
/// Set query node ORDER BY * modifier value
/// Set query node ORDER BY ALL modifier value
void setIsOrderByAll(bool is_order_by_all_value)
{
is_order_by_all = is_order_by_all_value;

View File

@ -1,4 +1,3 @@
#include <cstddef>
#include <Core/NamesAndTypes.h>
#include <base/sort.h>
@ -12,6 +11,8 @@
#include <IO/WriteBufferFromString.h>
#include <IO/Operators.h>
#include <boost/algorithm/string.hpp>
#include <cstddef>
namespace DB
{
@ -218,6 +219,16 @@ bool NamesAndTypesList::contains(const String & name) const
return false;
}
bool NamesAndTypesList::containsCaseInsensitive(const String & name) const
{
for (const NameAndTypePair & column : *this)
{
if (boost::iequals(column.name, name))
return true;
}
return false;
}
std::optional<NameAndTypePair> NamesAndTypesList::tryGetByName(const std::string & name) const
{
for (const NameAndTypePair & column : *this)

View File

@ -114,8 +114,9 @@ public:
/// Unlike `filter`, returns columns in the order in which they go in `names`.
NamesAndTypesList addTypes(const Names & names) const;
/// Check that column contains in list
/// Check if `name` is one of the column names
bool contains(const String & name) const;
bool containsCaseInsensitive(const String & name) const;
/// Try to get column by name, returns empty optional if column not found
std::optional<NameAndTypePair> tryGetByName(const std::string & name) const;

View File

@ -868,13 +868,11 @@ class IColumn;
M(Bool, print_pretty_type_names, true, "Print pretty type names in DESCRIBE query and toTypeName() function", 0) \
M(Bool, create_table_empty_primary_key_by_default, false, "Allow to create *MergeTree tables with empty primary key when ORDER BY and PRIMARY KEY not specified", 0) \
M(Bool, allow_named_collection_override_by_default, true, "Allow named collections' fields override by default.", 0)\
M(SQLSecurityType, default_normal_view_sql_security, SQLSecurityType::INVOKER, "Allows to set a default value for SQL SECURITY option when creating a normal view.", 0) \
M(SQLSecurityType, default_materialized_view_sql_security, SQLSecurityType::DEFINER, "Allows to set a default value for SQL SECURITY option when creating a materialized view.", 0) \
M(String, default_view_definer, "CURRENT_USER", "Allows to set a default value for DEFINER option when creating view.", 0) \
M(Bool, allow_experimental_shared_merge_tree, false, "Only available in ClickHouse Cloud", 0) \
M(UInt64, cache_warmer_threads, 4, "Only available in ClickHouse Cloud", 0) \
M(Int64, ignore_cold_parts_seconds, 0, "Only available in ClickHouse Cloud", 0) \
M(Int64, prefer_warmed_unmerged_parts_seconds, 0, "Only available in ClickHouse Cloud", 0) \
M(Bool, enable_order_by_all, true, "Enable sorting expression ORDER BY ALL.", 0) \
M(Bool, iceberg_engine_ignore_schema_evolution, false, "Ignore schema evolution in Iceberg table engine and read all data using latest schema saved on table creation. Note that it can lead to incorrect result", 0) \
// End of COMMON_SETTINGS
@ -942,7 +940,6 @@ class IColumn;
MAKE_OBSOLETE(M, Bool, allow_experimental_undrop_table_query, true) \
MAKE_OBSOLETE(M, Bool, allow_experimental_s3queue, true) \
MAKE_OBSOLETE(M, Bool, query_plan_optimize_primary_key, true) \
MAKE_OBSOLETE(M, Bool, enable_order_by_all, true) \
/** The section above is for obsolete settings. Do not add anything there. */

View File

@ -103,10 +103,7 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
{"min_external_table_block_size_bytes", DEFAULT_INSERT_BLOCK_SIZE * 256, DEFAULT_INSERT_BLOCK_SIZE * 256, "Squash blocks passed to external table to specified size in bytes, if blocks are not big enough."},
{"parallel_replicas_prefer_local_join", true, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN."},
{"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."},
{"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"},
{"default_view_definer", "", "CURRENT_USER", "Allows to set default `DEFINER` option while creating a view"},
{"default_materialized_view_sql_security", "INVOKER", "DEFINER", "Allows to set a default value for SQL SECURITY option when creating a materialized view"},
{"default_normal_view_sql_security", "INVOKER", "INVOKER", "Allows to set default `SQL SECURITY` option while creating a normal view"}}},
}},
{"24.1", {{"print_pretty_type_names", false, true, "Better user experience."},
{"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"},
{"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"},

View File

@ -1,6 +1,5 @@
#include <Core/SettingsEnums.h>
#include <magic_enum.hpp>
#include <Access/Common/SQLSecurityDefs.h>
namespace DB
@ -207,9 +206,4 @@ IMPLEMENT_SETTING_ENUM(DateTimeOverflowBehavior, ErrorCodes::BAD_ARGUMENTS,
{{"throw", FormatSettings::DateTimeOverflowBehavior::Throw},
{"ignore", FormatSettings::DateTimeOverflowBehavior::Ignore},
{"saturate", FormatSettings::DateTimeOverflowBehavior::Saturate}})
IMPLEMENT_SETTING_ENUM(SQLSecurityType, ErrorCodes::BAD_ARGUMENTS,
{{"DEFINER", SQLSecurityType::DEFINER},
{"INVOKER", SQLSecurityType::INVOKER},
{"NONE", SQLSecurityType::NONE}})
}

View File

@ -6,7 +6,6 @@
#include <Formats/FormatSettings.h>
#include <IO/ReadSettings.h>
#include <Common/ShellCommandSettings.h>
#include <Parsers/ASTSQLSecurity.h>
namespace DB
@ -267,5 +266,4 @@ DECLARE_SETTING_ENUM(SchemaInferenceMode)
DECLARE_SETTING_ENUM_WITH_RENAME(DateTimeOverflowBehavior, FormatSettings::DateTimeOverflowBehavior)
DECLARE_SETTING_ENUM(SQLSecurityType)
}

View File

@ -152,8 +152,17 @@ private:
auto * archive = archive_read_new();
try
{
archive_read_support_filter_all(archive);
archive_read_support_format_all(archive);
// Support for bzip2, gzip, lzip, xz, zstd and lz4
archive_read_support_filter_bzip2(archive);
archive_read_support_filter_gzip(archive);
archive_read_support_filter_xz(archive);
archive_read_support_filter_lz4(archive);
archive_read_support_filter_zstd(archive);
// Support tar, 7zip and zip
archive_read_support_format_tar(archive);
archive_read_support_format_7zip(archive);
archive_read_support_format_zip(archive);
if (archive_read_open_filename(archive, path_to_archive.c_str(), 10240) != ARCHIVE_OK)
throw Exception(ErrorCodes::CANNOT_UNPACK_ARCHIVE, "Couldn't open archive {}: {}", quoteString(path_to_archive), archive_error_string(archive));
}

View File

@ -715,7 +715,7 @@ std::string Client::getRegionForBucket(const std::string & bucket, bool force_de
if (outcome.IsSuccess())
{
const auto & result = outcome.GetResult();
region = result.GetRegion();
region = result.GetBucketRegion();
}
else
{

View File

@ -261,7 +261,7 @@ void AsynchronousInsertQueue::preprocessInsertQuery(const ASTPtr & query, const
InterpreterInsertQuery interpreter(query, query_context, query_context->getSettingsRef().insert_allow_materialized_columns);
auto table = interpreter.getTable(insert_query);
auto sample_block = interpreter.getSampleBlock(insert_query, table, table->getInMemoryMetadataPtr(), query_context);
auto sample_block = interpreter.getSampleBlock(insert_query, table, table->getInMemoryMetadataPtr());
if (!FormatFactory::instance().isInputFormat(insert_query.format))
throw Exception(ErrorCodes::UNKNOWN_FORMAT, "Unknown input format {}", insert_query.format);

View File

@ -794,7 +794,6 @@ ContextMutablePtr Context::createGlobal(ContextSharedPart * shared_part)
{
auto res = std::shared_ptr<Context>(new Context);
res->shared = shared_part;
res->query_access_info = std::make_shared<QueryAccessInfo>();
return res;
}
@ -814,9 +813,7 @@ SharedContextHolder Context::createShared()
ContextMutablePtr Context::createCopy(const ContextPtr & other)
{
SharedLockGuard lock(other->mutex);
auto new_context = std::shared_ptr<Context>(new Context(*other));
new_context->query_access_info = std::make_shared<QueryAccessInfo>(*other->query_access_info);
return new_context;
return std::shared_ptr<Context>(new Context(*other));
}
ContextMutablePtr Context::createCopy(const ContextWeakPtr & other)
@ -1610,12 +1607,12 @@ void Context::addQueryAccessInfo(
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info");
std::lock_guard lock(query_access_info->mutex);
query_access_info->databases.emplace(quoted_database_name);
query_access_info->tables.emplace(full_quoted_table_name);
std::lock_guard lock(query_access_info.mutex);
query_access_info.databases.emplace(quoted_database_name);
query_access_info.tables.emplace(full_quoted_table_name);
for (const auto & column_name : column_names)
query_access_info->columns.emplace(full_quoted_table_name + "." + backQuoteIfNeed(column_name));
query_access_info.columns.emplace(full_quoted_table_name + "." + backQuoteIfNeed(column_name));
}
void Context::addQueryAccessInfo(const Names & partition_names)
@ -1623,9 +1620,9 @@ void Context::addQueryAccessInfo(const Names & partition_names)
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info");
std::lock_guard<std::mutex> lock(query_access_info->mutex);
std::lock_guard<std::mutex> lock(query_access_info.mutex);
for (const auto & partition_name : partition_names)
query_access_info->partitions.emplace(partition_name);
query_access_info.partitions.emplace(partition_name);
}
void Context::addViewAccessInfo(const String & view_name)
@ -1633,8 +1630,8 @@ void Context::addViewAccessInfo(const String & view_name)
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info");
std::lock_guard<std::mutex> lock(query_access_info->mutex);
query_access_info->views.emplace(view_name);
std::lock_guard<std::mutex> lock(query_access_info.mutex);
query_access_info.views.emplace(view_name);
}
void Context::addQueryAccessInfo(const QualifiedProjectionName & qualified_projection_name)
@ -1645,8 +1642,8 @@ void Context::addQueryAccessInfo(const QualifiedProjectionName & qualified_proje
if (isGlobalContext())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have query access info");
std::lock_guard<std::mutex> lock(query_access_info->mutex);
query_access_info->projections.emplace(fmt::format(
std::lock_guard<std::mutex> lock(query_access_info.mutex);
query_access_info.projections.emplace(fmt::format(
"{}.{}", qualified_projection_name.storage_id.getFullTableName(), backQuoteIfNeed(qualified_projection_name.projection_name)));
}
@ -2297,8 +2294,7 @@ void Context::setMacros(std::unique_ptr<Macros> && macros)
ContextMutablePtr Context::getQueryContext() const
{
auto ptr = query_context.lock();
if (!ptr)
throw Exception(ErrorCodes::THERE_IS_NO_QUERY, "There is no query or query context has expired");
if (!ptr) throw Exception(ErrorCodes::THERE_IS_NO_QUERY, "There is no query or query context has expired");
return ptr;
}

View File

@ -350,11 +350,8 @@ protected:
std::set<std::string> projections{};
std::set<std::string> views{};
};
using QueryAccessInfoPtr = std::shared_ptr<QueryAccessInfo>;
/// In some situations, we want to be able to transfer the access info from children back to parents (e.g. definers context).
/// Therefore, query_access_info must be a pointer.
QueryAccessInfoPtr query_access_info;
QueryAccessInfo query_access_info;
/// Record names of created objects of factories (for testing, etc)
struct QueryFactoriesInfo
@ -679,9 +676,7 @@ public:
const Block * tryGetSpecialScalar(const String & name) const;
void addSpecialScalar(const String & name, const Block & block);
const QueryAccessInfo & getQueryAccessInfo() const { return *getQueryAccessInfoPtr(); }
const QueryAccessInfoPtr getQueryAccessInfoPtr() const { return query_access_info; }
void setQueryAccessInfo(QueryAccessInfoPtr other) { query_access_info = other; }
const QueryAccessInfo & getQueryAccessInfo() const { return query_access_info; }
void addQueryAccessInfo(
const String & quoted_database_name,

View File

@ -8,7 +8,6 @@
#include <Interpreters/AddDefaultDatabaseVisitor.h>
#include <Interpreters/Context.h>
#include <Interpreters/FunctionNameNormalizer.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <Interpreters/MutationsInterpreter.h>
#include <Interpreters/MutationsNonDeterministicHelpers.h>
#include <Interpreters/QueryLog.h>
@ -72,13 +71,6 @@ BlockIO InterpreterAlterQuery::execute()
BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter)
{
for (auto & child : alter.command_list->children)
{
auto * command_ast = child->as<ASTAlterCommand>();
if (command_ast->sql_security)
InterpreterCreateQuery::processSQLSecurityOption(getContext(), command_ast->sql_security->as<ASTSQLSecurity &>());
}
BlockIO res;
if (!UserDefinedSQLFunctionFactory::instance().empty())
@ -496,11 +488,6 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccessForCommand(const AS
required_access.emplace_back(AccessType::ALTER_MODIFY_COMMENT, database, table);
break;
}
case ASTAlterCommand::MODIFY_SQL_SECURITY:
{
required_access.emplace_back(AccessType::ALTER_VIEW_MODIFY_SQL_SECURITY, database, table);
break;
}
}
return required_access;

View File

@ -2,9 +2,6 @@
#include <filesystem>
#include <Access/AccessControl.h>
#include <Access/User.h>
#include "Common/Exception.h"
#include <Common/StringUtils/StringUtils.h>
#include <Common/escapeForFileName.h>
@ -1097,8 +1094,6 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create)
String current_database = getContext()->getCurrentDatabase();
auto database_name = create.database ? create.getDatabase() : current_database;
if (create.sql_security)
processSQLSecurityOption(getContext(), create.sql_security->as<ASTSQLSecurity &>(), create.attach, create.is_materialized_view);
DDLGuardPtr ddl_guard;
@ -1888,61 +1883,6 @@ void InterpreterCreateQuery::addColumnsDescriptionToCreateQueryIfNecessary(ASTCr
}
}
void InterpreterCreateQuery::processSQLSecurityOption(ContextPtr context_, ASTSQLSecurity & sql_security, bool is_attach, bool is_materialized_view)
{
/// If no SQL security is specified, apply default from default_*_view_sql_security setting.
if (!sql_security.type.has_value())
{
SQLSecurityType default_security;
if (is_materialized_view)
default_security = context_->getSettingsRef().default_materialized_view_sql_security;
else
default_security = context_->getSettingsRef().default_normal_view_sql_security;
if (default_security == SQLSecurityType::DEFINER)
{
String default_definer = context_->getSettingsRef().default_view_definer;
if (default_definer == "CURRENT_USER")
sql_security.is_definer_current_user = true;
else
sql_security.definer = std::make_shared<ASTUserNameWithHost>(default_definer);
}
sql_security.type = default_security;
}
/// Resolves `DEFINER = CURRENT_USER`. Can change the SQL security type if we try to resolve the user during the attachment.
const auto current_user_name = context_->getUserName();
if (sql_security.is_definer_current_user)
{
if (current_user_name.empty())
/// This can happen only when attaching a view for the first time after migration and with `CURRENT_USER` default.
if (is_materialized_view)
sql_security.type = SQLSecurityType::NONE;
else
sql_security.type = SQLSecurityType::INVOKER;
else if (sql_security.definer)
sql_security.definer->replace(current_user_name);
else
sql_security.definer = std::make_shared<ASTUserNameWithHost>(current_user_name);
}
/// Checks the permissions for the specified definer user.
if (sql_security.definer && !sql_security.is_definer_current_user && !is_attach)
{
const auto definer_name = sql_security.definer->toString();
/// Validate that the user exists.
context_->getAccessControl().getID<User>(definer_name);
if (definer_name != current_user_name)
context_->checkAccess(AccessType::SET_DEFINER, definer_name);
}
if (sql_security.type == SQLSecurityType::NONE && !is_attach)
context_->checkAccess(AccessType::ALLOW_SQL_SECURITY_NONE);
}
void registerInterpreterCreateQuery(InterpreterFactory & factory)
{
auto create_fn = [] (const InterpreterFactory::Arguments & args)

View File

@ -80,9 +80,6 @@ public:
void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr) const override;
/// Check access right, validate definer statement and replace `CURRENT USER` with actual name.
static void processSQLSecurityOption(ContextPtr context_, ASTSQLSecurity & sql_security, bool is_attach = false, bool is_materialized_view = false);
private:
struct TableProperties
{

View File

@ -125,10 +125,7 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query)
Block InterpreterInsertQuery::getSampleBlock(
const ASTInsertQuery & query,
const StoragePtr & table,
const StorageMetadataPtr & metadata_snapshot,
ContextPtr context_,
bool no_destination,
bool allow_materialized)
const StorageMetadataPtr & metadata_snapshot) const
{
/// If the query does not include information about columns
if (!query.columns)
@ -142,7 +139,7 @@ Block InterpreterInsertQuery::getSampleBlock(
}
/// Form the block based on the column names from the query
const auto columns_ast = processColumnTransformers(context_->getCurrentDatabase(), table, metadata_snapshot, query.columns);
const auto columns_ast = processColumnTransformers(getContext()->getCurrentDatabase(), table, metadata_snapshot, query.columns);
Names names;
names.reserve(columns_ast->children.size());
for (const auto & identifier : columns_ast->children)
@ -151,7 +148,7 @@ Block InterpreterInsertQuery::getSampleBlock(
names.emplace_back(std::move(current_name));
}
return getSampleBlock(names, table, metadata_snapshot, allow_materialized);
return getSampleBlock(names, table, metadata_snapshot);
}
std::optional<Names> InterpreterInsertQuery::getInsertColumnNames() const
@ -176,8 +173,7 @@ std::optional<Names> InterpreterInsertQuery::getInsertColumnNames() const
Block InterpreterInsertQuery::getSampleBlock(
const Names & names,
const StoragePtr & table,
const StorageMetadataPtr & metadata_snapshot,
bool allow_materialized)
const StorageMetadataPtr & metadata_snapshot) const
{
Block table_sample_physical = metadata_snapshot->getSampleBlock();
Block table_sample_insertable = metadata_snapshot->getSampleBlockInsertable();
@ -264,8 +260,7 @@ Chain InterpreterInsertQuery::buildChain(
const StorageMetadataPtr & metadata_snapshot,
const Names & columns,
ThreadStatusesHolderPtr thread_status_holder,
std::atomic_uint64_t * elapsed_counter_ms,
bool check_access)
std::atomic_uint64_t * elapsed_counter_ms)
{
ProfileEvents::increment(ProfileEvents::InsertQueriesWithSubqueries);
ProfileEvents::increment(ProfileEvents::QueriesWithSubqueries);
@ -276,9 +271,7 @@ Chain InterpreterInsertQuery::buildChain(
if (!running_group)
running_group = std::make_shared<ThreadGroup>(getContext());
auto sample = getSampleBlock(columns, table, metadata_snapshot, allow_materialized);
if (check_access)
getContext()->checkAccess(AccessType::INSERT, table->getStorageID(), sample.getNames());
auto sample = getSampleBlock(columns, table, metadata_snapshot);
Chain sink = buildSink(table, metadata_snapshot, thread_status_holder, running_group, elapsed_counter_ms);
Chain chain = buildPreSinkChain(sink.getInputHeader(), table, metadata_snapshot, sample);
@ -404,7 +397,7 @@ BlockIO InterpreterInsertQuery::execute()
auto table_lock = table->lockForShare(getContext()->getInitialQueryId(), settings.lock_acquire_timeout);
auto metadata_snapshot = table->getInMemoryMetadataPtr();
auto query_sample_block = getSampleBlock(query, table, metadata_snapshot, getContext(), no_destination, allow_materialized);
auto query_sample_block = getSampleBlock(query, table, metadata_snapshot);
/// For table functions we check access while executing
/// getTable() -> ITableFunction::execute().

View File

@ -46,21 +46,14 @@ public:
const StorageMetadataPtr & metadata_snapshot,
const Names & columns,
ThreadStatusesHolderPtr thread_status_holder = {},
std::atomic_uint64_t * elapsed_counter_ms = nullptr,
bool check_access = false);
std::atomic_uint64_t * elapsed_counter_ms = nullptr);
static void extendQueryLogElemImpl(QueryLogElement & elem, ContextPtr context_);
void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr context_) const override;
StoragePtr getTable(ASTInsertQuery & query);
static Block getSampleBlock(
const ASTInsertQuery & query,
const StoragePtr & table,
const StorageMetadataPtr & metadata_snapshot,
ContextPtr context_,
bool no_destination = false,
bool allow_materialized = false);
Block getSampleBlock(const ASTInsertQuery & query, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot) const;
bool supportsTransactions() const override { return true; }
@ -69,7 +62,7 @@ public:
bool shouldAddSquashingFroStorage(const StoragePtr & table) const;
private:
static Block getSampleBlock(const Names & names, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot, bool allow_materialized);
Block getSampleBlock(const Names & names, const StoragePtr & table, const StorageMetadataPtr & metadata_snapshot) const;
ASTPtr query_ptr;
const bool allow_materialized;

View File

@ -843,7 +843,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
if (query.prewhere() && !query.where())
analysis_result.prewhere_info->need_filter = true;
if (table_id && got_storage_from_query && !joined_tables.isLeftTableFunction() && !options.ignore_access_check)
if (table_id && got_storage_from_query && !joined_tables.isLeftTableFunction())
{
/// The current user should have the SELECT privilege. If this table_id is for a table
/// function we don't check access rights here because in this case they have been already

View File

@ -46,10 +46,6 @@ struct SelectQueryOptions
/// Bypass setting constraints for some internal queries such as projection ASTs.
bool ignore_setting_constraints = false;
/// Bypass access check for select query.
/// This allows to skip double access check in some specific cases (e.g. insert into table with materialized view)
bool ignore_access_check = false;
/// These two fields are used to evaluate shardNum() and shardCount() function when
/// prefer_localhost_replica == 1 and local instance is selected. They are needed because local
/// instance might have multiple shards and scalars can only hold one value.
@ -133,12 +129,6 @@ struct SelectQueryOptions
return *this;
}
SelectQueryOptions & ignoreAccessCheck(bool value = true)
{
ignore_access_check = value;
return *this;
}
SelectQueryOptions & setInternal(bool value = false)
{
is_internal = value;

View File

@ -60,6 +60,8 @@
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <boost/algorithm/string.hpp>
namespace DB
{
@ -73,6 +75,7 @@ namespace ErrorCodes
extern const int NOT_IMPLEMENTED;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int UNKNOWN_IDENTIFIER;
extern const int UNEXPECTED_EXPRESSION;
}
namespace
@ -775,7 +778,7 @@ void expandGroupByAll(ASTSelectQuery * select_query)
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, group_expression_list);
}
void expandOrderByAll(ASTSelectQuery * select_query)
void expandOrderByAll(ASTSelectQuery * select_query, [[maybe_unused]] const TablesWithColumns & tables_with_columns)
{
auto * all_elem = select_query->orderBy()->children[0]->as<ASTOrderByElement>();
if (!all_elem)
@ -785,6 +788,32 @@ void expandOrderByAll(ASTSelectQuery * select_query)
for (const auto & expr : select_query->select()->children)
{
/// Detect and reject ambiguous statements:
/// E.g. for a table with columns "all", "a", "b":
/// - SELECT all, a, b ORDER BY all; -- should we sort by all columns in SELECT or by column "all"?
/// - SELECT a, b AS all ORDER BY all; -- like before but "all" as alias
/// - SELECT func(...) AS all ORDER BY all; -- like before but "all" as function
/// - SELECT a, b ORDER BY all; -- tricky in other way: does the user want to sort by columns in SELECT clause or by not SELECTed column "all"?
static const String all = "all";
if (auto * identifier = expr->as<ASTIdentifier>(); identifier != nullptr)
if (boost::iequals(identifier->name(), all) || boost::iequals(identifier->alias, all))
throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION,
"Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again");
if (auto * function = expr->as<ASTFunction>(); function != nullptr)
if (boost::iequals(function->alias, all))
throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION,
"Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again");
for (const auto & table_with_columns : tables_with_columns)
{
const auto & columns = table_with_columns.columns;
if (columns.containsCaseInsensitive(all))
throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION,
"Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again");
}
auto elem = std::make_shared<ASTOrderByElement>();
elem->direction = all_elem->direction;
elem->nulls_direction = all_elem->nulls_direction;
@ -1311,9 +1340,9 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect(
if (select_query->group_by_all)
expandGroupByAll(select_query);
// expand ORDER BY *
if (select_query->order_by_all)
expandOrderByAll(select_query);
// expand ORDER BY ALL
if (settings.enable_order_by_all && select_query->order_by_all)
expandOrderByAll(select_query, tables_with_columns);
/// Remove unneeded columns according to 'required_result_columns'.
/// Leave all selected columns in case of DISTINCT; columns that contain arrayJoin function inside.

View File

@ -103,7 +103,6 @@ namespace ErrorCodes
extern const int NOT_IMPLEMENTED;
extern const int QUERY_WAS_CANCELLED;
extern const int INCORRECT_DATA;
extern const int SUPPORT_IS_DISABLED;
}
namespace FailPoints
@ -709,7 +708,9 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
{
if (settings.dialect == Dialect::kusto && !internal)
{
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Kusto dialect is disabled until these two bugs will be fixed: https://github.com/ClickHouse/ClickHouse/issues/59037 and https://github.com/ClickHouse/ClickHouse/issues/59036");
ParserKQLStatement parser(end, settings.allow_settings_after_format_in_insert);
/// TODO: parser should fail early when max_query_size limit is reached.
ast = parseKQLQuery(parser, begin, end, "", max_query_size, settings.max_parser_depth);
}
else if (settings.dialect == Dialect::prql && !internal)
{

View File

@ -479,11 +479,6 @@ void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState &
settings.ostr << (settings.hilite ? hilite_keyword : "") << " TO ";
rename_to->formatImpl(settings, state, frame);
}
else if (type == ASTAlterCommand::MODIFY_SQL_SECURITY)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "MODIFY " << (settings.hilite ? hilite_none : "");
sql_security->formatImpl(settings, state, frame);
}
else if (type == ASTAlterCommand::APPLY_DELETED_MASK)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "APPLY DELETED MASK" << (settings.hilite ? hilite_none : "");

View File

@ -84,7 +84,6 @@ public:
MODIFY_DATABASE_SETTING,
MODIFY_COMMENT,
MODIFY_SQL_SECURITY,
};
Type type = NO_TYPE;
@ -167,9 +166,6 @@ public:
/// For MODIFY_QUERY
IAST * select = nullptr;
/// For MODIFY_SQL_SECURITY
IAST * sql_security = nullptr;
/// In ALTER CHANNEL, ADD, DROP, SUSPEND, RESUME, REFRESH, MODIFY queries, the list of live views is stored here
IAST * values = nullptr;

View File

@ -12,37 +12,6 @@
namespace DB
{
void ASTSQLSecurity::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{
if (!type.has_value())
return;
if (definer || is_definer_current_user)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "DEFINER" << (settings.hilite ? hilite_none : "");
settings.ostr << " = ";
if (definer)
definer->formatImpl(settings, state, frame);
else
settings.ostr << "CURRENT_USER";
settings.ostr << " ";
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SQL SECURITY" << (settings.hilite ? hilite_none : "");
switch (*type)
{
case SQLSecurityType::INVOKER:
settings.ostr << " INVOKER";
break;
case SQLSecurityType::DEFINER:
settings.ostr << " DEFINER";
break;
case SQLSecurityType::NONE:
settings.ostr << " NONE";
break;
}
}
ASTPtr ASTStorage::clone() const
{
auto res = std::make_shared<ASTStorage>(*this);
@ -323,9 +292,10 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
else if (is_window_view)
what = "WINDOW VIEW";
settings.ostr << (settings.hilite ? hilite_keyword : "") << action << (settings.hilite ? hilite_none : "");
settings.ostr << " ";
settings.ostr << (settings.hilite ? hilite_keyword : "") << (temporary ? "TEMPORARY " : "")
settings.ostr
<< (settings.hilite ? hilite_keyword : "")
<< action << " "
<< (temporary ? "TEMPORARY " : "")
<< what << " "
<< (if_not_exists ? "IF NOT EXISTS " : "")
<< (settings.hilite ? hilite_none : "")
@ -474,16 +444,10 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
else if (is_create_empty)
settings.ostr << (settings.hilite ? hilite_keyword : "") << " EMPTY" << (settings.hilite ? hilite_none : "");
if (sql_security && sql_security->as<ASTSQLSecurity &>().type.has_value())
{
settings.ostr << settings.nl_or_ws;
sql_security->formatImpl(settings, state, frame);
}
if (select)
{
settings.ostr << settings.nl_or_ws;
settings.ostr << (settings.hilite ? hilite_keyword : "") << "AS "
settings.ostr << (settings.hilite ? hilite_keyword : "") << " AS"
<< settings.nl_or_ws
<< (comment ? "(" : "") << (settings.hilite ? hilite_none : "");
select->formatImpl(settings, state, frame);
settings.ostr << (settings.hilite ? hilite_keyword : "") << (comment ? ")" : "") << (settings.hilite ? hilite_none : "");

View File

@ -5,7 +5,6 @@
#include <Parsers/ASTDictionary.h>
#include <Parsers/ASTDictionaryAttributeDeclaration.h>
#include <Parsers/ASTTableOverrides.h>
#include <Parsers/ASTSQLSecurity.h>
#include <Parsers/ASTRefreshStrategy.h>
#include <Interpreters/StorageID.h>
@ -16,7 +15,6 @@ class ASTFunction;
class ASTSetQuery;
class ASTSelectWithUnionQuery;
class ASTStorage : public IAST
{
public:
@ -113,7 +111,6 @@ public:
IAST * as_table_function = nullptr;
ASTSelectWithUnionQuery * select = nullptr;
IAST * comment = nullptr;
ASTPtr sql_security = nullptr;
ASTTableOverrideList * table_overrides = nullptr; /// For CREATE DATABASE with engines that automatically create tables

View File

@ -1,39 +0,0 @@
#include <Parsers/ASTSQLSecurity.h>
#include <IO/Operators.h>
namespace DB
{
void ASTSQLSecurity::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
{
if (!type.has_value())
return;
if (definer || is_definer_current_user)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << "DEFINER" << (settings.hilite ? hilite_none : "");
settings.ostr << " = ";
if (definer)
definer->formatImpl(settings, state, frame);
else
settings.ostr << "CURRENT_USER";
settings.ostr << " ";
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << "SQL SECURITY" << (settings.hilite ? hilite_none : "");
switch (*type)
{
case SQLSecurityType::INVOKER:
settings.ostr << " INVOKER";
break;
case SQLSecurityType::DEFINER:
settings.ostr << " DEFINER";
break;
case SQLSecurityType::NONE:
settings.ostr << " NONE";
break;
}
}
}

View File

@ -1,26 +0,0 @@
#pragma once
#include <Parsers/Access/ASTUserNameWithHost.h>
#include <Access/Common/SQLSecurityDefs.h>
namespace DB
{
/// DEFINER = <user_name | CURRENT_USER> SQL SECURITY <DEFINER | INVOKER | NONE>
/// If type was not set during parsing, the default type from settings will be used.
/// Currently supports only views.
class ASTSQLSecurity : public IAST
{
public:
bool is_definer_current_user{false};
std::shared_ptr<ASTUserNameWithHost> definer = nullptr;
std::optional<SQLSecurityType> type = std::nullopt;
String getID(char) const override { return "View SQL Security"; }
ASTPtr clone() const override { return std::make_shared<ASTSQLSecurity>(*this); }
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override;
};
}

View File

@ -165,7 +165,7 @@ void ASTSelectQuery::formatImpl(const FormatSettings & s, FormatState & state, F
if (order_by_all)
{
s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "ORDER BY *" << (s.hilite ? hilite_none : "");
s.ostr << (s.hilite ? hilite_keyword : "") << s.nl_or_ws << indent_str << "ORDER BY ALL" << (s.hilite ? hilite_none : "");
auto * elem = orderBy()->children[0]->as<ASTOrderByElement>();
s.ostr << (s.hilite ? hilite_keyword : "")

View File

@ -28,12 +28,6 @@ void ASTUserNameWithHost::concatParts()
host_pattern.clear();
}
void ASTUserNameWithHost::replace(const String name_)
{
base_name = name_;
host_pattern.clear();
}
void ASTUserNamesWithHost::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const
{

View File

@ -27,7 +27,6 @@ public:
String getID(char) const override { return "UserNameWithHost"; }
ASTPtr clone() const override { return std::make_shared<ASTUserNameWithHost>(*this); }
void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
void replace(const String name_);
};

View File

@ -73,7 +73,7 @@ bool IParserKQLFunction::directMapping(
int argument_count = 0;
const auto begin = pos;
while (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
if (pos != begin)
out.append(", ");
@ -148,11 +148,11 @@ String IParserKQLFunction::getConvertedArgument(const String & fn_name, IParser:
if (pos->type == TokenType::ClosingRoundBracket || pos->type == TokenType::ClosingSquareBracket)
return {};
if (pos->isEnd() || pos->type == TokenType::PipeMark || pos->type == TokenType::Semicolon)
if (!isValidKQLPos(pos) || pos->type == TokenType::PipeMark || pos->type == TokenType::Semicolon)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Need more argument(s) in function: {}", fn_name);
std::vector<String> tokens;
while (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
if (pos->type == TokenType::OpeningRoundBracket)
++round_bracket_count;
@ -191,7 +191,7 @@ String IParserKQLFunction::getConvertedArgument(const String & fn_name, IParser:
{
++pos;
String array_index;
while (!pos->isEnd() && pos->type != TokenType::ClosingSquareBracket)
while (isValidKQLPos(pos) && pos->type != TokenType::ClosingSquareBracket)
{
array_index += getExpression(pos);
++pos;
@ -246,7 +246,7 @@ IParserKQLFunction::getOptionalArgument(const String & function_name, DB::IParse
const auto * begin = pos->begin;
std::stack<DB::TokenType> scopes;
while (!pos->isEnd() && (!scopes.empty() || (pos->type != DB::TokenType::Comma && pos->type != DB::TokenType::ClosingRoundBracket)))
while (isValidKQLPos(pos) && (!scopes.empty() || (pos->type != DB::TokenType::Comma && pos->type != DB::TokenType::ClosingRoundBracket)))
{
const auto token_type = pos->type;
if (isOpeningBracket(token_type))
@ -357,7 +357,7 @@ String IParserKQLFunction::getExpression(IParser::Pos & pos)
{
++pos;
String array_index;
while (!pos->isEnd() && pos->type != TokenType::ClosingSquareBracket)
while (isValidKQLPos(pos) && pos->type != TokenType::ClosingSquareBracket)
{
array_index += getExpression(pos);
++pos;

View File

@ -7,6 +7,7 @@
#include <Parsers/Kusto/ParserKQLDateTypeTimespan.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/ParserKQLStatement.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ParserSetQuery.h>
#include "Poco/String.h"
#include <format>
@ -51,7 +52,7 @@ bool DatatypeDatetime::convertImpl(String & out, IParser::Pos & pos)
else
{
auto start = pos;
while (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
++pos;
if (pos->type == TokenType::ClosingRoundBracket)
@ -77,7 +78,7 @@ bool DatatypeDynamic::convertImpl(String & out, IParser::Pos & pos)
if (pos->type == TokenType::OpeningCurlyBrace)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Property bags are not supported for now in {}", function_name);
while (!pos->isEnd() && pos->type != TokenType::ClosingRoundBracket)
while (isValidKQLPos(pos) && pos->type != TokenType::ClosingRoundBracket)
{
if (const auto token_type = pos->type; token_type == TokenType::BareWord || token_type == TokenType::Number
|| token_type == TokenType::QuotedIdentifier || token_type == TokenType::StringLiteral)
@ -117,7 +118,7 @@ bool DatatypeGuid::convertImpl(String & out, IParser::Pos & pos)
else
{
auto start = pos;
while (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
++pos;
if (pos->type == TokenType::ClosingRoundBracket)

View File

@ -14,6 +14,7 @@
#include <Parsers/Kusto/KustoFunctions/KQLTimeSeriesFunctions.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/ParserKQLStatement.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ParserSetQuery.h>
#include "Poco/String.h"
namespace DB::ErrorCodes
@ -521,7 +522,7 @@ bool MakeTimeSpan::convertImpl(String & out, IParser::Pos & pos)
String second;
int arg_count = 0;
std::vector<String> args;
while (!pos->isEnd() && pos->type != TokenType::ClosingRoundBracket)
while (isValidKQLPos(pos) && pos->type != TokenType::ClosingRoundBracket)
{
String arg = getConvertedArgument(fn_name, pos);
args.insert(args.begin(), arg);
@ -588,7 +589,7 @@ bool MakeDateTime::convertImpl(String & out, IParser::Pos & pos)
String arguments;
int arg_count = 0;
while (!pos->isEnd() && pos->type != TokenType::ClosingRoundBracket)
while (isValidKQLPos(pos) && pos->type != TokenType::ClosingRoundBracket)
{
String arg = getConvertedArgument(fn_name, pos);
if (pos->type == TokenType::Comma)

View File

@ -3,7 +3,7 @@
#include <Parsers/Kusto/KustoFunctions/IParserKQLFunction.h>
#include <Parsers/Kusto/KustoFunctions/KQLFunctionFactory.h>
#include <Parsers/Kusto/KustoFunctions/KQLStringFunctions.h>
#include <Parsers/Kusto/Utilities.h>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
#include <Poco/String.h>
@ -590,7 +590,7 @@ bool StrCatDelim::convertImpl(String & out, IParser::Pos & pos)
int arg_count = 0;
String args;
while (!pos->isEnd() && pos->type != TokenType::Semicolon && pos->type != TokenType::ClosingRoundBracket)
while (isValidKQLPos(pos) && pos->type != TokenType::Semicolon && pos->type != TokenType::ClosingRoundBracket)
{
++pos;
String arg = getConvertedArgument(fn_name, pos);

View File

@ -7,6 +7,7 @@
#include <Parsers/Kusto/ParserKQLOperators.h>
#include <Parsers/Kusto/ParserKQLProject.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ParserSelectQuery.h>
#include <Parsers/ParserTablesInSelectQuery.h>
@ -44,7 +45,7 @@ bool ParserKQLExtend ::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
int32_t round_bracket_count = 0;
int32_t square_bracket_count = 0;
while (!npos->isEnd())
while (isValidKQLPos(npos))
{
if (npos->type == TokenType::OpeningRoundBracket)
++round_bracket_count;

View File

@ -7,6 +7,7 @@
#include <Parsers/Kusto/ParserKQLMakeSeries.h>
#include <Parsers/Kusto/ParserKQLOperators.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ParserSelectQuery.h>
#include <Parsers/ParserSetQuery.h>
#include <Parsers/ParserTablesInSelectQuery.h>
@ -49,7 +50,7 @@ bool ParserKQLMVExpand::parseColumnArrayExprs(ColumnArrayExprs & column_array_ex
String to_type;
--expr_end_pos;
while (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
if (pos->type == TokenType::OpeningRoundBracket)
++bracket_count;
@ -125,9 +126,9 @@ bool ParserKQLMVExpand::parseColumnArrayExprs(ColumnArrayExprs & column_array_ex
if (String(pos->begin, pos->end) == "limit")
break;
if (!pos->isEnd())
if (isValidKQLPos(pos))
++pos;
if (pos->isEnd() || pos->type == TokenType::PipeMark || pos->type == TokenType::Semicolon)
if (!isValidKQLPos(pos) || pos->type == TokenType::PipeMark || pos->type == TokenType::Semicolon)
{
if (expr_end_pos < expr_begin_pos)
{

View File

@ -6,6 +6,7 @@
#include <Parsers/Kusto/ParserKQLMakeSeries.h>
#include <Parsers/Kusto/ParserKQLOperators.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ParserSelectQuery.h>
#include <Parsers/ParserTablesInSelectQuery.h>
@ -39,7 +40,7 @@ bool ParserKQLMakeSeries ::parseAggregationColumns(AggregationColumns & aggregat
ParserToken close_bracket(TokenType::ClosingRoundBracket);
ParserToken comma(TokenType::Comma);
while (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
String alias;
String aggregation_fun;
@ -96,7 +97,7 @@ bool ParserKQLMakeSeries ::parseFromToStepClause(FromToStepClause & from_to_step
auto step_pos = begin;
auto end_pos = begin;
while (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
if (String(pos->begin, pos->end) == "from")
from_pos = pos;
@ -175,7 +176,7 @@ bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr &
Tokens tokens(src.c_str(), src.c_str() + src.size());
IParser::Pos pos(tokens, max_depth);
String res;
while (!pos->isEnd())
while (isValidKQLPos(pos))
{
String tmp = String(pos->begin, pos->end);
if (tmp == "parseDateTime64BestEffortOrNull")
@ -201,7 +202,7 @@ bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr &
std::vector<String> group_expression_tokens;
Tokens tokens(group_expression.c_str(), group_expression.c_str() + group_expression.size());
IParser::Pos pos(tokens, max_depth);
while (!pos->isEnd())
while (isValidKQLPos(pos))
{
if (String(pos->begin, pos->end) == "AS")
{

View File

@ -5,6 +5,7 @@
#include <Parsers/Kusto/ParserKQLOperators.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/ParserKQLStatement.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/formatAST.h>
@ -148,7 +149,7 @@ String KQLOperators::genHasAnyAllOpExpr(std::vector<String> & tokens, IParser::P
String logic_op = (kql_op == "has_all") ? " and " : " or ";
while (!token_pos->isEnd() && token_pos->type != TokenType::PipeMark && token_pos->type != TokenType::Semicolon)
while (isValidKQLPos(token_pos) && token_pos->type != TokenType::PipeMark && token_pos->type != TokenType::Semicolon)
{
auto tmp_arg = IParserKQLFunction::getExpression(token_pos);
if (token_pos->type == TokenType::Comma)
@ -217,7 +218,7 @@ String genInOpExprCis(std::vector<String> & tokens, DB::IParser::Pos & token_pos
--token_pos;
new_expr += ch_op;
while (!token_pos->isEnd() && token_pos->type != DB::TokenType::PipeMark && token_pos->type != DB::TokenType::Semicolon)
while (isValidKQLPos(token_pos) && token_pos->type != DB::TokenType::PipeMark && token_pos->type != DB::TokenType::Semicolon)
{
auto tmp_arg = String(token_pos->begin, token_pos->end);
if (token_pos->type != DB::TokenType::Comma && token_pos->type != DB::TokenType::ClosingRoundBracket
@ -329,7 +330,7 @@ bool KQLOperators::convert(std::vector<String> & tokens, IParser::Pos & pos)
{
auto begin = pos;
if (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
if (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
KQLOperatorValue op_value = KQLOperatorValue::none;
@ -339,14 +340,14 @@ bool KQLOperators::convert(std::vector<String> & tokens, IParser::Pos & pos)
if (token == "!")
{
++pos;
if (pos->isEnd() || pos->type == TokenType::PipeMark || pos->type == TokenType::Semicolon)
if (!isValidKQLPos(pos) || pos->type == TokenType::PipeMark || pos->type == TokenType::Semicolon)
throw Exception(ErrorCodes::SYNTAX_ERROR, "Invalid negative operator");
op = "!" + String(pos->begin, pos->end);
}
else if (token == "matches")
{
++pos;
if (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
if (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
if (String(pos->begin, pos->end) == "regex")
op += " regex";
@ -360,7 +361,7 @@ bool KQLOperators::convert(std::vector<String> & tokens, IParser::Pos & pos)
}
++pos;
if (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
if (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
if (String(pos->begin, pos->end) == "~")
op += "~";

View File

@ -20,6 +20,7 @@
#include <Parsers/Kusto/ParserKQLStatement.h>
#include <Parsers/Kusto/ParserKQLSummarize.h>
#include <Parsers/Kusto/ParserKQLTable.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ParserSelectWithUnionQuery.h>
#include <Parsers/ParserTablesInSelectQuery.h>
@ -59,33 +60,34 @@ bool ParserKQLBase::setSubQuerySource(ASTPtr & select_query, ASTPtr & source, bo
if (!select_query || !select_query->as<ASTSelectQuery>()->tables()
|| select_query->as<ASTSelectQuery>()->tables()->as<ASTTablesInSelectQuery>()->children.empty())
return false;
table_expr = select_query->as<ASTSelectQuery>()->tables()->as<ASTTablesInSelectQuery>()->children[0];
table_expr = select_query->as<ASTSelectQuery>()->tables()->as<ASTTablesInSelectQuery>()->children.at(0);
table_expr->as<ASTTablesInSelectQueryElement>()->table_expression
= source->as<ASTSelectQuery>()->tables()->children[0]->as<ASTTablesInSelectQueryElement>()->table_expression;
= source->as<ASTSelectQuery>()->tables()->children.at(0)->as<ASTTablesInSelectQueryElement>()->table_expression;
table_expr->children.at(0) = table_expr->as<ASTTablesInSelectQueryElement>()->table_expression;
return true;
}
if (!select_query || select_query->as<ASTTablesInSelectQuery>()->children.empty()
|| !select_query->as<ASTTablesInSelectQuery>()->children[0]->as<ASTTablesInSelectQueryElement>()->table_expression
|| !select_query->as<ASTTablesInSelectQuery>()->children.at(0)->as<ASTTablesInSelectQueryElement>()->table_expression
|| select_query->as<ASTTablesInSelectQuery>()
->children[0]
->children.at(0)
->as<ASTTablesInSelectQueryElement>()
->table_expression->as<ASTTableExpression>()
->subquery->children.empty()
|| select_query->as<ASTTablesInSelectQuery>()
->children[0]
->children.at(0)
->as<ASTTablesInSelectQueryElement>()
->table_expression->as<ASTTableExpression>()
->subquery->children[0]
->subquery->children.at(0)
->as<ASTSelectWithUnionQuery>()
->list_of_selects->children.empty()
|| select_query->as<ASTTablesInSelectQuery>()
->children[0]
->children.at(0)
->as<ASTTablesInSelectQueryElement>()
->table_expression->as<ASTTableExpression>()
->subquery->children[0]
->subquery->children.at(0)
->as<ASTSelectWithUnionQuery>()
->list_of_selects->children[0]
->list_of_selects->children.at(0)
->as<ASTSelectQuery>()
->tables()
->as<ASTTablesInSelectQuery>()
@ -93,28 +95,29 @@ bool ParserKQLBase::setSubQuerySource(ASTPtr & select_query, ASTPtr & source, bo
return false;
table_expr = select_query->as<ASTTablesInSelectQuery>()
->children[0]
->children.at(0)
->as<ASTTablesInSelectQueryElement>()
->table_expression->as<ASTTableExpression>()
->subquery->children[0]
->subquery->children.at(0)
->as<ASTSelectWithUnionQuery>()
->list_of_selects->children[0]
->list_of_selects->children.at(0)
->as<ASTSelectQuery>()
->tables()
->as<ASTTablesInSelectQuery>()
->children[0];
->children.at(0);
if (!src_is_subquery)
{
table_expr->as<ASTTablesInSelectQueryElement>()->table_expression
= source->as<ASTSelectQuery>()->tables()->children[0]->as<ASTTablesInSelectQueryElement>()->table_expression;
= source->as<ASTSelectQuery>()->tables()->children.at(0)->as<ASTTablesInSelectQueryElement>()->table_expression;
}
else
{
table_expr->as<ASTTablesInSelectQueryElement>()->table_expression
= source->children[0]->as<ASTTablesInSelectQueryElement>()->table_expression;
= source->children.at(0)->as<ASTTablesInSelectQueryElement>()->table_expression;
}
table_expr->children.at(0) = table_expr->as<ASTTablesInSelectQueryElement>()->table_expression;
return true;
}
@ -130,7 +133,7 @@ String ParserKQLBase::getExprFromPipe(Pos & pos)
{
BracketCount bracket_count;
auto end = pos;
while (!end->isEnd() && end->type != TokenType::Semicolon)
while (isValidKQLPos(end) && end->type != TokenType::Semicolon)
{
bracket_count.count(end);
if (end->type == TokenType::PipeMark && bracket_count.isZero())
@ -149,7 +152,7 @@ String ParserKQLBase::getExprFromToken(Pos & pos)
comma_pos.push_back(pos);
size_t paren_count = 0;
while (!pos->isEnd() && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::Semicolon)
{
if (pos->type == TokenType::PipeMark && paren_count == 0)
break;
@ -373,7 +376,7 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
uint16_t bracket_count = 0;
while (!pos->isEnd() && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::Semicolon)
{
if (pos->type == TokenType::OpeningRoundBracket)
++bracket_count;
@ -383,6 +386,9 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
if (pos->type == TokenType::PipeMark && bracket_count == 0)
{
++pos;
if (!isValidKQLPos(pos))
return false;
String kql_operator(pos->begin, pos->end);
auto validate_kql_operator = [&]
@ -390,6 +396,9 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
if (kql_operator == "order" || kql_operator == "sort")
{
++pos;
if (!isValidKQLPos(pos))
return false;
ParserKeyword s_by("by");
if (s_by.ignore(pos, expected))
{
@ -401,6 +410,9 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
auto op_pos_begin = pos;
++pos;
if (!isValidKQLPos(pos))
return false;
ParserToken s_dash(TokenType::Minus);
if (s_dash.ignore(pos, expected))
{
@ -418,6 +430,9 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
if (!validate_kql_operator())
return false;
++pos;
if (!isValidKQLPos(pos))
return false;
operation_pos.push_back(std::make_pair(kql_operator, pos));
}
else
@ -627,6 +642,7 @@ bool ParserSimpleCHSubquery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
ASTPtr node_table_in_select_query_element = std::make_shared<ASTTablesInSelectQueryElement>();
node_table_in_select_query_element->as<ASTTablesInSelectQueryElement>()->table_expression = node_table_expr;
node_table_in_select_query_element->children.emplace_back(node_table_expr);
ASTPtr res = std::make_shared<ASTTablesInSelectQuery>();
res->children.emplace_back(node_table_in_select_query_element);

View File

@ -4,6 +4,7 @@
#include <Parsers/IParserBase.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/ParserKQLSort.h>
#include <Parsers/Kusto/Utilities.h>
namespace DB
{
@ -24,7 +25,7 @@ bool ParserKQLSort::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
if (!order_list.parse(pos_backup, order_expression_list, expected))
return false;
while (!new_pos->isEnd() && new_pos->type != TokenType::PipeMark && new_pos->type != TokenType::Semicolon)
while (isValidKQLPos(new_pos) && new_pos->type != TokenType::PipeMark && new_pos->type != TokenType::Semicolon)
{
String tmp(new_pos->begin, new_pos->end);
if (tmp == "desc" || tmp == "asc")

View File

@ -5,6 +5,7 @@
#include <Parsers/Kusto/KustoFunctions/KQLFunctionFactory.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/ParserKQLStatement.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ParserSetQuery.h>
namespace DB
@ -79,7 +80,7 @@ bool ParserKQLTableFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
{
++paren_count;
auto pos_start = pos;
while (!pos->isEnd())
while (isValidKQLPos(pos))
{
if (pos->type == TokenType::ClosingRoundBracket)
--paren_count;

View File

@ -15,6 +15,7 @@
#include <Parsers/IParserBase.h>
#include <Parsers/Kusto/ParserKQLQuery.h>
#include <Parsers/Kusto/ParserKQLSummarize.h>
#include <Parsers/Kusto/Utilities.h>
#include <Parsers/ParserSampleRatio.h>
#include <Parsers/ParserSelectQuery.h>
#include <Parsers/ParserSetQuery.h>
@ -120,7 +121,7 @@ bool ParserKQLSummarize::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
if (String(equal_pos->begin, equal_pos->end) != "=")
{
String groupby_fun = String(begin_pos->begin, begin_pos->end);
if (equal_pos->isEnd() || equal_pos->type == TokenType::Comma || equal_pos->type == TokenType::Semicolon
if (!equal_pos.isValid() || equal_pos->type == TokenType::Comma || equal_pos->type == TokenType::Semicolon
|| equal_pos->type == TokenType::PipeMark)
{
expr = groupby_fun;
@ -151,7 +152,7 @@ bool ParserKQLSummarize::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
}
};
while (!pos->isEnd() && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
while (isValidKQLPos(pos) && pos->type != TokenType::PipeMark && pos->type != TokenType::Semicolon)
{
if (pos->type == TokenType::OpeningRoundBracket)
++bracket_count;

View File

@ -71,4 +71,12 @@ ASTPtr wrapInSelectWithUnion(const ASTPtr & select_query)
return select_with_union_query;
}
bool isValidKQLPos(IParser::Pos & pos)
{
return (pos.isValid() ||
pos->type == TokenType::ErrorSingleExclamationMark || // allow kql negative operators
pos->type == TokenType::ErrorWrongNumber || // allow kql timespan data type with decimal like 2.6h
std::string_view(pos->begin, pos->end) == "~"); // allow kql Case-Sensitive operators
}
}

View File

@ -10,4 +10,5 @@ String extractTokenWithoutQuotes(IParser::Pos & pos);
void setSelectAll(ASTSelectQuery & select_query);
String wildcardToRegex(const String & wildcard);
ASTPtr wrapInSelectWithUnion(const ASTPtr & select_query);
bool isValidKQLPos(IParser::Pos & pos);
}

View File

@ -40,7 +40,6 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ParserKeyword s_modify_setting("MODIFY SETTING");
ParserKeyword s_reset_setting("RESET SETTING");
ParserKeyword s_modify_query("MODIFY QUERY");
ParserKeyword s_modify_sql_security("MODIFY SQL SECURITY");
ParserKeyword s_modify_refresh("MODIFY REFRESH");
ParserKeyword s_add_index("ADD INDEX");
@ -143,7 +142,6 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
/* allow_empty = */ false);
ParserNameList values_p;
ParserSelectWithUnionQuery select_p;
ParserSQLSecurity sql_security_p;
ParserRefreshStrategy refresh_p;
ParserTTLExpressionList parser_ttl_list;
@ -168,7 +166,6 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
ASTPtr command_select;
ASTPtr command_values;
ASTPtr command_rename_to;
ASTPtr command_sql_security;
if (with_round_bracket)
{
@ -877,14 +874,6 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
return false;
command->type = ASTAlterCommand::MODIFY_QUERY;
}
else if (s_modify_sql_security.ignore(pos, expected))
{
/// This is a hack so we can reuse parser from create and don't have to write `MODIFY SQL SECURITY SQL SECURITY INVOKER`
pos -= 2;
if (!sql_security_p.parse(pos, command_sql_security, expected))
return false;
command->type = ASTAlterCommand::MODIFY_SQL_SECURITY;
}
else if (s_modify_refresh.ignore(pos, expected))
{
if (!refresh_p.parse(pos, command->refresh, expected))
@ -959,8 +948,6 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
command->select = command->children.emplace_back(std::move(command_select)).get();
if (command_values)
command->values = command->children.emplace_back(std::move(command_values)).get();
if (command_sql_security)
command->sql_security = command->children.emplace_back(std::move(command_sql_security)).get();
if (command_rename_to)
command->rename_to = command->children.emplace_back(std::move(command_rename_to)).get();

View File

@ -1,5 +1,4 @@
#include <IO/ReadHelpers.h>
#include <Parsers/Access/ParserUserNameWithHost.h>
#include <Parsers/ASTConstraintDeclaration.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTExpressionList.h>
@ -85,65 +84,6 @@ bool ParserNestedTable::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
return true;
}
bool ParserSQLSecurity::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
ParserToken s_eq(TokenType::Equals);
ParserKeyword s_definer("DEFINER");
bool is_definer_current_user = false;
ASTPtr definer;
std::optional<SQLSecurityType> type;
while (true)
{
if (!definer && s_definer.ignore(pos, expected))
{
s_eq.ignore(pos, expected);
if (ParserKeyword{"CURRENT_USER"}.ignore(pos, expected))
is_definer_current_user = true;
else if (!ParserUserNameWithHost{}.parse(pos, definer, expected))
return false;
continue;
}
if (!type && ParserKeyword{"SQL SECURITY"}.ignore(pos, expected))
{
if (s_definer.ignore(pos, expected))
type = SQLSecurityType::DEFINER;
else if (ParserKeyword{"INVOKER"}.ignore(pos, expected))
type = SQLSecurityType::INVOKER;
else if (ParserKeyword{"NONE"}.ignore(pos, expected))
type = SQLSecurityType::NONE;
else
return false;
continue;
}
break;
}
if (!type)
{
if (is_definer_current_user || definer)
type = SQLSecurityType::DEFINER;
else
return false;
}
else if (type == SQLSecurityType::DEFINER && !definer)
is_definer_current_user = true;
auto result = std::make_shared<ASTSQLSecurity>();
result->is_definer_current_user = is_definer_current_user;
result->type = type;
if (definer)
result->definer = typeid_cast<std::shared_ptr<ASTUserNameWithHost>>(definer);
node = std::move(result);
return true;
}
bool ParserIdentifierWithParameters::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
@ -909,7 +849,6 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
ParserStorage storage_inner{ParserStorage::TABLE_ENGINE};
ParserTablePropertiesDeclarationList table_properties_p;
ParserSelectWithUnionQuery select_p;
ParserSQLSecurity sql_security_p;
ASTPtr table;
ASTPtr to_table;
@ -918,7 +857,6 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
ASTPtr as_table;
ASTPtr select;
ASTPtr live_view_periodic_refresh;
ASTPtr sql_security;
String cluster_str;
bool attach = false;
@ -935,8 +873,6 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
return false;
}
sql_security_p.parse(pos, sql_security, expected);
if (!s_live.ignore(pos, expected))
return false;
@ -989,9 +925,6 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
return false;
}
if (!sql_security && !sql_security_p.parse(pos, sql_security, expected))
sql_security = std::make_shared<ASTSQLSecurity>();
/// AS SELECT ...
if (!s_as.ignore(pos, expected))
return false;
@ -1034,9 +967,6 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e
if (comment)
query->set(query->comment, comment);
if (sql_security)
query->sql_security = typeid_cast<std::shared_ptr<ASTSQLSecurity>>(sql_security);
return true;
}
@ -1454,7 +1384,6 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
ParserTablePropertiesDeclarationList table_properties_p;
ParserSelectWithUnionQuery select_p;
ParserNameList names_p;
ParserSQLSecurity sql_security_p;
ASTPtr table;
ASTPtr to_table;
@ -1464,7 +1393,6 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
ASTPtr as_database;
ASTPtr as_table;
ASTPtr select;
ASTPtr sql_security;
ASTPtr refresh_strategy;
String cluster_str;
@ -1490,8 +1418,6 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
replace_view = true;
}
sql_security_p.parse(pos, sql_security, expected);
if (!replace_view && s_materialized.ignore(pos, expected))
{
is_materialized_view = true;
@ -1584,9 +1510,6 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
}
}
if (!sql_security && !sql_security_p.parse(pos, sql_security, expected))
sql_security = std::make_shared<ASTSQLSecurity>();
/// AS SELECT ...
if (!s_as.ignore(pos, expected))
return false;
@ -1629,7 +1552,6 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
query->set(query->refresh_strategy, refresh_strategy);
if (comment)
query->set(query->comment, comment);
query->sql_security = typeid_cast<std::shared_ptr<ASTSQLSecurity>>(sql_security);
tryGetIdentifierNameInto(as_database, query->as_database);
tryGetIdentifierNameInto(as_table, query->as_table);

View File

@ -25,14 +25,6 @@ protected:
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
/** Parses sql security option. DEFINER = user_name SQL SECURITY DEFINER
*/
class ParserSQLSecurity : public IParserBase
{
protected:
const char * getName() const override { return "sql security"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
/** Storage engine or Codec. For example:
* Memory()

View File

@ -1,23 +1,21 @@
#include <Parsers/ParserSelectQuery.h>
#include <Parsers/ASTAsterisk.h>
#include <Parsers/ASTExpressionList.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTInterpolateElement.h>
#include <memory>
#include <Parsers/ASTLiteral.h>
#include <Parsers/ASTOrderByElement.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/IParserBase.h>
#include <Parsers/CommonParsers.h>
#include <Parsers/ExpressionElementParsers.h>
#include <Parsers/ExpressionListParsers.h>
#include <Parsers/IParserBase.h>
#include <Parsers/ParserSampleRatio.h>
#include <Parsers/ParserSetQuery.h>
#include <Parsers/ParserSampleRatio.h>
#include <Parsers/ParserSelectQuery.h>
#include <Parsers/ParserTablesInSelectQuery.h>
#include <Parsers/ParserWithElement.h>
#include <Parsers/ASTOrderByElement.h>
#include <Parsers/ASTExpressionList.h>
#include <Parsers/ASTInterpolateElement.h>
#include <Parsers/ASTIdentifier.h>
#include <Poco/String.h>
#include <memory>
namespace DB
{
@ -292,9 +290,9 @@ bool ParserSelectQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
}
else if (order_expression_list->children.size() == 1)
{
/// ORDER BY *
auto * asterisk = order_expression_list->children[0]->as<ASTOrderByElement>()->children[0]->as<ASTAsterisk>();
if (asterisk != nullptr)
/// ORDER BY ALL
auto * identifier = order_expression_list->children[0]->as<ASTOrderByElement>()->children[0]->as<ASTIdentifier>();
if (identifier != nullptr && Poco::toUpper(identifier->name()) == "ALL")
select_query->order_by_all = true;
}
}

View File

@ -62,18 +62,6 @@ public:
return *this;
}
ALWAYS_INLINE TokenIterator & operator-=(int value)
{
index -= value;
return *this;
}
ALWAYS_INLINE TokenIterator & operator+=(int value)
{
index += value;
return *this;
}
ALWAYS_INLINE bool operator<(const TokenIterator & rhs) const { return index < rhs.index; }
ALWAYS_INLINE bool operator<=(const TokenIterator & rhs) const { return index <= rhs.index; }
ALWAYS_INLINE bool operator==(const TokenIterator & rhs) const { return index == rhs.index; }

View File

@ -188,244 +188,6 @@ private:
std::exception_ptr any_exception;
};
/// Generates one chain part for every view in buildPushingToViewsChain
std::optional<Chain> generateViewChain(
ContextPtr context,
const StorageID & view_id,
ThreadGroupPtr running_group,
Chain & result_chain,
ViewsDataPtr views_data,
ThreadStatusesHolderPtr thread_status_holder,
bool async_insert,
const Block & storage_header,
bool disable_deduplication_for_children)
{
auto view = DatabaseCatalog::instance().tryGetTable(view_id, context);
if (view == nullptr)
{
LOG_WARNING(
getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName());
return std::nullopt;
}
auto view_metadata_snapshot = view->getInMemoryMetadataPtr();
auto select_context = view_metadata_snapshot->getSQLSecurityOverriddenContext(context);
select_context->setQueryAccessInfo(context->getQueryAccessInfoPtr());
auto insert_context = Context::createCopy(select_context);
const auto & insert_settings = insert_context->getSettingsRef();
// Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children)
{
insert_context->setSetting("insert_deduplicate", Field{false});
}
else if (insert_settings.update_insert_deduplication_token_in_dependent_materialized_views &&
!insert_settings.insert_deduplication_token.value.empty())
{
/** Update deduplication token passed to dependent MV with current view id. So it is possible to properly handle
* deduplication in complex INSERT flows.
*
* Example:
*
* landing ---> mv_1_1 ---> ds_1_1 ---> mv_2_1 ---> ds_2_1 ---> mv_3_1 ---> ds_3_1
* | |
* --> mv_1_2 ---> ds_1_2 ---> mv_2_2 --
*
* Here we want to avoid deduplication for two different blocks generated from `mv_2_1` and `mv_2_2` that will
* be inserted into `ds_2_1`.
*
* We are forced to use view id instead of table id because there are some possible INSERT flows where no tables
* are involved.
*
* Example:
*
* landing ---> mv_1_1 ---> ds_1_1
* | |
* --> mv_1_2 --
*
*/
auto insert_deduplication_token = insert_settings.insert_deduplication_token.value;
if (view_id.hasUUID())
insert_deduplication_token += "_" + toString(view_id.uuid);
else
insert_deduplication_token += "_" + view_id.getFullNameNotQuoted();
insert_context->setSetting("insert_deduplication_token", insert_deduplication_token);
}
// Processing of blocks for MVs is done block by block, and there will
// be no parallel reading after (plus it is not a costless operation)
select_context->setSetting("parallelize_output_from_storages", Field{false});
// Separate min_insert_block_size_rows/min_insert_block_size_bytes for children
if (insert_settings.min_insert_block_size_rows_for_materialized_views)
insert_context->setSetting("min_insert_block_size_rows", insert_settings.min_insert_block_size_rows_for_materialized_views.value);
if (insert_settings.min_insert_block_size_bytes_for_materialized_views)
insert_context->setSetting("min_insert_block_size_bytes", insert_settings.min_insert_block_size_bytes_for_materialized_views.value);
ASTPtr query;
Chain out;
/// We are creating a ThreadStatus per view to store its metrics individually
/// Since calling ThreadStatus() changes current_thread we save it and restore it after the calls
/// Later on, before doing any task related to a view, we'll switch to its ThreadStatus, do the work,
/// and switch back to the original thread_status.
auto * original_thread = current_thread;
SCOPE_EXIT({ current_thread = original_thread; });
current_thread = nullptr;
std::unique_ptr<ThreadStatus> view_thread_status_ptr = std::make_unique<ThreadStatus>(/*check_current_thread_on_destruction=*/ false);
/// Copy of a ThreadStatus should be internal.
view_thread_status_ptr->setInternalThread();
view_thread_status_ptr->attachToGroup(running_group);
auto * view_thread_status = view_thread_status_ptr.get();
views_data->thread_status_holder->thread_statuses.push_front(std::move(view_thread_status_ptr));
auto runtime_stats = std::make_unique<QueryViewsLogElement::ViewRuntimeStats>();
runtime_stats->target_name = view_id.getFullTableName();
runtime_stats->thread_status = view_thread_status;
runtime_stats->event_time = std::chrono::system_clock::now();
runtime_stats->event_status = QueryViewsLogElement::ViewStatus::EXCEPTION_BEFORE_START;
auto & type = runtime_stats->type;
auto & target_name = runtime_stats->target_name;
auto * view_counter_ms = &runtime_stats->elapsed_ms;
if (auto * materialized_view = dynamic_cast<StorageMaterializedView *>(view.get()))
{
auto lock = materialized_view->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
if (lock == nullptr)
{
// In case the materialized view is dropped/detached at this point, we register a warning and ignore it
assert(materialized_view->is_dropped || materialized_view->is_detached);
LOG_WARNING(
getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName());
return std::nullopt;
}
type = QueryViewsLogElement::ViewType::MATERIALIZED;
result_chain.addTableLock(lock);
StoragePtr inner_table = materialized_view->tryGetTargetTable();
/// If target table was dropped, ignore this materialized view.
if (!inner_table)
{
if (context->getSettingsRef().ignore_materialized_views_with_dropped_target_table)
return std::nullopt;
throw Exception(
ErrorCodes::UNKNOWN_TABLE,
"Target table '{}' of view '{}' doesn't exists. To ignore this view use setting "
"ignore_materialized_views_with_dropped_target_table",
materialized_view->getTargetTableId().getFullTableName(),
view_id.getFullTableName());
}
auto inner_table_id = inner_table->getStorageID();
auto inner_metadata_snapshot = inner_table->getInMemoryMetadataPtr();
const auto & select_query = view_metadata_snapshot->getSelectQuery();
if (select_query.select_table_id != views_data->source_storage_id)
{
/// It may happen if materialize view query was changed and it doesn't depend on this source table anymore.
/// See setting `allow_experimental_alter_materialized_view_structure`
LOG_DEBUG(
getLogger("PushingToViews"), "Table '{}' is not a source for view '{}' anymore, current source is '{}'",
select_query.select_table_id.getFullTableName(), view_id.getFullTableName(), views_data->source_storage_id);
return std::nullopt;
}
query = select_query.inner_query;
target_name = inner_table_id.getFullTableName();
Block header;
/// Get list of columns we get from select query.
if (select_context->getSettingsRef().allow_experimental_analyzer)
header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context);
else
header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock();
/// Insert only columns returned by select.
Names insert_columns;
const auto & inner_table_columns = inner_metadata_snapshot->getColumns();
for (const auto & column : header)
{
/// But skip columns which storage doesn't have.
if (inner_table_columns.hasNotAlias(column.name))
insert_columns.emplace_back(column.name);
}
InterpreterInsertQuery interpreter(nullptr, insert_context, false, false, false);
out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, thread_status_holder, view_counter_ms, !materialized_view->hasInnerTable());
if (interpreter.shouldAddSquashingFroStorage(inner_table))
{
bool table_prefers_large_blocks = inner_table->prefersLargeBlocks();
const auto & settings = insert_context->getSettingsRef();
out.addSource(std::make_shared<SquashingChunksTransform>(
out.getInputHeader(),
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL));
}
auto counting = std::make_shared<CountingTransform>(out.getInputHeader(), current_thread, insert_context->getQuota());
counting->setProcessListElement(insert_context->getProcessListElement());
counting->setProgressCallback(insert_context->getProgressCallback());
out.addSource(std::move(counting));
out.addStorageHolder(view);
out.addStorageHolder(inner_table);
}
else if (auto * live_view = dynamic_cast<StorageLiveView *>(view.get()))
{
runtime_stats->type = QueryViewsLogElement::ViewType::LIVE;
query = live_view->getInnerQuery();
out = buildPushingToViewsChain(
view, view_metadata_snapshot, insert_context, ASTPtr(),
/* no_destination= */ true,
thread_status_holder, running_group, view_counter_ms, async_insert, storage_header);
}
else if (auto * window_view = dynamic_cast<StorageWindowView *>(view.get()))
{
runtime_stats->type = QueryViewsLogElement::ViewType::WINDOW;
query = window_view->getMergeableQuery();
out = buildPushingToViewsChain(
view, view_metadata_snapshot, insert_context, ASTPtr(),
/* no_destination= */ true,
thread_status_holder, running_group, view_counter_ms, async_insert);
}
else
out = buildPushingToViewsChain(
view, view_metadata_snapshot, insert_context, ASTPtr(),
/* no_destination= */ false,
thread_status_holder, running_group, view_counter_ms, async_insert);
views_data->views.emplace_back(ViewRuntimeData{
std::move(query),
out.getInputHeader(),
view_id,
nullptr,
std::move(runtime_stats)});
if (type == QueryViewsLogElement::ViewType::MATERIALIZED)
{
auto executing_inner_query = std::make_shared<ExecutingInnerQueryFromViewTransform>(
storage_header, views_data->views.back(), views_data);
executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms);
out.addSource(std::move(executing_inner_query));
}
return out;
}
Chain buildPushingToViewsChain(
const StoragePtr & storage,
@ -470,45 +232,259 @@ Chain buildPushingToViewsChain(
auto table_id = storage->getStorageID();
auto views = DatabaseCatalog::instance().getDependentViews(table_id);
/// We need special context for materialized views insertions
ContextMutablePtr select_context;
ContextMutablePtr insert_context;
ViewsDataPtr views_data;
if (!views.empty())
{
auto process_context = Context::createCopy(context); /// This context will be used in `process` function
views_data = std::make_shared<ViewsData>(thread_status_holder, process_context, table_id, metadata_snapshot, storage);
select_context = Context::createCopy(context);
insert_context = Context::createCopy(context);
const auto & insert_settings = insert_context->getSettingsRef();
// Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children)
{
insert_context->setSetting("insert_deduplicate", Field{false});
}
// Processing of blocks for MVs is done block by block, and there will
// be no parallel reading after (plus it is not a costless operation)
select_context->setSetting("parallelize_output_from_storages", Field{false});
// Separate min_insert_block_size_rows/min_insert_block_size_bytes for children
if (insert_settings.min_insert_block_size_rows_for_materialized_views)
insert_context->setSetting("min_insert_block_size_rows", insert_settings.min_insert_block_size_rows_for_materialized_views.value);
if (insert_settings.min_insert_block_size_bytes_for_materialized_views)
insert_context->setSetting("min_insert_block_size_bytes", insert_settings.min_insert_block_size_bytes_for_materialized_views.value);
views_data = std::make_shared<ViewsData>(thread_status_holder, select_context, table_id, metadata_snapshot, storage);
}
std::vector<Chain> chains;
for (const auto & view_id : views)
{
try
auto view = DatabaseCatalog::instance().tryGetTable(view_id, context);
if (view == nullptr)
{
auto out = generateViewChain(
context, view_id, running_group, result_chain,
views_data, thread_status_holder, async_insert, storage_header, disable_deduplication_for_children);
if (!out.has_value())
continue;
chains.emplace_back(std::move(*out));
/// Add the view to the query access info so it can appear in system.query_log
/// hasQueryContext - for materialized tables with background replication process query context is not added
if (!no_destination && context->hasQueryContext())
{
context->getQueryContext()->addQueryAccessInfo(
backQuoteIfNeed(view_id.getDatabaseName()),
views_data->views.back().runtime_stats->target_name,
/*column_names=*/ {});
context->getQueryContext()->addViewAccessInfo(view_id.getFullTableName());
}
LOG_WARNING(
getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName());
continue;
}
catch (const Exception & e)
auto view_metadata_snapshot = view->getInMemoryMetadataPtr();
ASTPtr query;
Chain out;
/// We are creating a ThreadStatus per view to store its metrics individually
/// Since calling ThreadStatus() changes current_thread we save it and restore it after the calls
/// Later on, before doing any task related to a view, we'll switch to its ThreadStatus, do the work,
/// and switch back to the original thread_status.
auto * original_thread = current_thread;
SCOPE_EXIT({ current_thread = original_thread; });
current_thread = nullptr;
std::unique_ptr<ThreadStatus> view_thread_status_ptr = std::make_unique<ThreadStatus>(/*check_current_thread_on_destruction=*/ false);
/// Copy of a ThreadStatus should be internal.
view_thread_status_ptr->setInternalThread();
view_thread_status_ptr->attachToGroup(running_group);
auto * view_thread_status = view_thread_status_ptr.get();
views_data->thread_status_holder->thread_statuses.push_front(std::move(view_thread_status_ptr));
auto runtime_stats = std::make_unique<QueryViewsLogElement::ViewRuntimeStats>();
runtime_stats->target_name = view_id.getFullTableName();
runtime_stats->thread_status = view_thread_status;
runtime_stats->event_time = std::chrono::system_clock::now();
runtime_stats->event_status = QueryViewsLogElement::ViewStatus::EXCEPTION_BEFORE_START;
auto & type = runtime_stats->type;
auto & target_name = runtime_stats->target_name;
auto * view_counter_ms = &runtime_stats->elapsed_ms;
const auto & insert_settings = insert_context->getSettingsRef();
ContextMutablePtr view_insert_context = insert_context;
if (!disable_deduplication_for_children &&
insert_settings.update_insert_deduplication_token_in_dependent_materialized_views &&
!insert_settings.insert_deduplication_token.value.empty())
{
LOG_ERROR(&Poco::Logger::get("PushingToViews"), "Failed to push block to view {}, {}", view_id, e.message());
if (!context->getSettingsRef().materialized_views_ignore_errors)
throw;
/** Update deduplication token passed to dependent MV with current view id. So it is possible to properly handle
* deduplication in complex INSERT flows.
*
* Example:
*
* landing ---> mv_1_1 ---> ds_1_1 ---> mv_2_1 ---> ds_2_1 ---> mv_3_1 ---> ds_3_1
* | |
* --> mv_1_2 ---> ds_1_2 ---> mv_2_2 --
*
* Here we want to avoid deduplication for two different blocks generated from `mv_2_1` and `mv_2_2` that will
* be inserted into `ds_2_1`.
*
* We are forced to use view id instead of table id because there are some possible INSERT flows where no tables
* are involved.
*
* Example:
*
* landing ---> mv_1_1 ---> ds_1_1
* | |
* --> mv_1_2 --
*
*/
auto insert_deduplication_token = insert_settings.insert_deduplication_token.value;
if (view_id.hasUUID())
insert_deduplication_token += "_" + toString(view_id.uuid);
else
insert_deduplication_token += "_" + view_id.getFullNameNotQuoted();
view_insert_context = Context::createCopy(insert_context);
view_insert_context->setSetting("insert_deduplication_token", insert_deduplication_token);
}
if (auto * materialized_view = dynamic_cast<StorageMaterializedView *>(view.get()))
{
auto lock = materialized_view->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
if (lock == nullptr)
{
// In case the materialized view is dropped/detached at this point, we register a warning and ignore it
assert(materialized_view->is_dropped || materialized_view->is_detached);
LOG_WARNING(
getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName());
continue;
}
type = QueryViewsLogElement::ViewType::MATERIALIZED;
result_chain.addTableLock(lock);
StoragePtr inner_table = materialized_view->tryGetTargetTable();
/// If target table was dropped, ignore this materialized view.
if (!inner_table)
{
if (context->getSettingsRef().ignore_materialized_views_with_dropped_target_table)
continue;
throw Exception(
ErrorCodes::UNKNOWN_TABLE,
"Target table '{}' of view '{}' doesn't exists. To ignore this view use setting "
"ignore_materialized_views_with_dropped_target_table",
materialized_view->getTargetTableId().getFullTableName(),
view_id.getFullTableName());
}
auto inner_table_id = inner_table->getStorageID();
auto inner_metadata_snapshot = inner_table->getInMemoryMetadataPtr();
const auto & select_query = view_metadata_snapshot->getSelectQuery();
if (select_query.select_table_id != table_id)
{
/// It may happen if materialize view query was changed and it doesn't depend on this source table anymore.
/// See setting `allow_experimental_alter_materialized_view_structure`
LOG_DEBUG(
getLogger("PushingToViews"), "Table '{}' is not a source for view '{}' anymore, current source is '{}'",
select_query.select_table_id.getFullTableName(), view_id.getFullTableName(), table_id);
continue;
}
query = select_query.inner_query;
target_name = inner_table_id.getFullTableName();
Block header;
/// Get list of columns we get from select query.
if (select_context->getSettingsRef().allow_experimental_analyzer)
header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context);
else
header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock();
/// Insert only columns returned by select.
Names insert_columns;
const auto & inner_table_columns = inner_metadata_snapshot->getColumns();
for (const auto & column : header)
{
/// But skip columns which storage doesn't have.
if (inner_table_columns.hasNotAlias(column.name))
insert_columns.emplace_back(column.name);
}
InterpreterInsertQuery interpreter(nullptr, view_insert_context, false, false, false);
out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, thread_status_holder, view_counter_ms);
if (interpreter.shouldAddSquashingFroStorage(inner_table))
{
bool table_prefers_large_blocks = inner_table->prefersLargeBlocks();
const auto & settings = view_insert_context->getSettingsRef();
out.addSource(std::make_shared<SquashingChunksTransform>(
out.getInputHeader(),
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL));
}
auto counting = std::make_shared<CountingTransform>(out.getInputHeader(), current_thread, view_insert_context->getQuota());
counting->setProcessListElement(view_insert_context->getProcessListElement());
counting->setProgressCallback(view_insert_context->getProgressCallback());
out.addSource(std::move(counting));
out.addStorageHolder(view);
out.addStorageHolder(inner_table);
}
else if (auto * live_view = dynamic_cast<StorageLiveView *>(view.get()))
{
runtime_stats->type = QueryViewsLogElement::ViewType::LIVE;
query = live_view->getInnerQuery(); // Used only to log in system.query_views_log
out = buildPushingToViewsChain(
view, view_metadata_snapshot, view_insert_context, ASTPtr(),
/* no_destination= */ true,
thread_status_holder, running_group, view_counter_ms, async_insert, storage_header);
}
else if (auto * window_view = dynamic_cast<StorageWindowView *>(view.get()))
{
runtime_stats->type = QueryViewsLogElement::ViewType::WINDOW;
query = window_view->getMergeableQuery(); // Used only to log in system.query_views_log
out = buildPushingToViewsChain(
view, view_metadata_snapshot, view_insert_context, ASTPtr(),
/* no_destination= */ true,
thread_status_holder, running_group, view_counter_ms, async_insert);
}
else
out = buildPushingToViewsChain(
view, view_metadata_snapshot, view_insert_context, ASTPtr(),
/* no_destination= */ false,
thread_status_holder, running_group, view_counter_ms, async_insert);
views_data->views.emplace_back(ViewRuntimeData{
std::move(query),
out.getInputHeader(),
view_id,
nullptr,
std::move(runtime_stats)});
if (type == QueryViewsLogElement::ViewType::MATERIALIZED)
{
auto executing_inner_query = std::make_shared<ExecutingInnerQueryFromViewTransform>(
storage_header, views_data->views.back(), views_data);
executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms);
out.addSource(std::move(executing_inner_query));
}
chains.emplace_back(std::move(out));
/// Add the view to the query access info so it can appear in system.query_log
/// hasQueryContext - for materialized tables with background replication process query context is not added
if (!no_destination && context->hasQueryContext())
{
context->getQueryContext()->addQueryAccessInfo(
backQuoteIfNeed(view_id.getDatabaseName()),
views_data->views.back().runtime_stats->target_name,
/*column_names=*/ {});
context->getQueryContext()->addViewAccessInfo(view_id.getFullTableName());
}
}
@ -605,12 +581,12 @@ static QueryPipeline process(Block block, ViewRuntimeData & view, const ViewsDat
if (local_context->getSettingsRef().allow_experimental_analyzer)
{
InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions().ignoreAccessCheck());
InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions());
pipeline = interpreter.buildQueryPipeline();
}
else
{
InterpreterSelectQuery interpreter(view.query, local_context, SelectQueryOptions().ignoreAccessCheck());
InterpreterSelectQuery interpreter(view.query, local_context, SelectQueryOptions());
pipeline = interpreter.buildQueryPipeline();
}

View File

@ -1,4 +1,7 @@
#pragma once
#include <mutex>
#include <Core/Types.h>
namespace DB

View File

@ -442,14 +442,6 @@ std::optional<AlterCommand> AlterCommand::parse(const ASTAlterCommand * command_
command.if_exists = command_ast->if_exists;
return command;
}
else if (command_ast->type == ASTAlterCommand::MODIFY_SQL_SECURITY)
{
AlterCommand command;
command.ast = command_ast->clone();
command.type = AlterCommand::MODIFY_SQL_SECURITY;
command.sql_security = command_ast->sql_security->clone();
return command;
}
else
return {};
}
@ -862,8 +854,6 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context)
for (auto & index : metadata.secondary_indices)
rename_visitor.visit(index.definition_ast);
}
else if (type == MODIFY_SQL_SECURITY)
metadata.setDefiner(sql_security->as<ASTSQLSecurity &>());
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong parameter type in ALTER query");
}

View File

@ -50,7 +50,6 @@ struct AlterCommand
MODIFY_DATABASE_SETTING,
COMMENT_TABLE,
REMOVE_SAMPLE_BY,
MODIFY_SQL_SECURITY,
};
/// Which property user wants to remove from column
@ -148,9 +147,6 @@ struct AlterCommand
/// For MODIFY_QUERY
ASTPtr select = nullptr;
/// For MODIFY_SQL_SECURITY
ASTPtr sql_security = nullptr;
/// For MODIFY_REFRESH
ASTPtr refresh = nullptr;

View File

@ -35,9 +35,9 @@
#include <Storages/extractKeyExpressionList.h>
#include <Storages/PartitionCommands.h>
#include <Interpreters/PartLog.h>
#include <Poco/Timestamp.h>
#include <Common/threadPoolCallbackRunner.h>
#include <boost/multi_index_container.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/global_fun.hpp>
@ -1353,6 +1353,93 @@ protected:
const MergeListEntry * merge_entry,
std::shared_ptr<ProfileEvents::Counters::Snapshot> profile_counters);
class PartMutationBackoffPolicy
{
struct PartMutationInfo
{
size_t retry_count;
size_t latest_fail_time_us;
size_t max_postpone_time_ms;
size_t max_postpone_power;
PartMutationInfo(size_t max_postpone_time_ms_)
: retry_count(0ull)
, latest_fail_time_us(static_cast<size_t>(Poco::Timestamp().epochMicroseconds()))
, max_postpone_time_ms(max_postpone_time_ms_)
, max_postpone_power((max_postpone_time_ms_) ? (static_cast<size_t>(std::log2(max_postpone_time_ms_))) : (0ull))
{}
size_t getNextMinExecutionTimeUsResolution() const
{
if (max_postpone_time_ms == 0)
return static_cast<size_t>(Poco::Timestamp().epochMicroseconds());
size_t current_backoff_interval_us = (1 << retry_count) * 1000ul;
return latest_fail_time_us + current_backoff_interval_us;
}
void addPartFailure()
{
if (max_postpone_time_ms == 0)
return;
retry_count = std::min(max_postpone_power, retry_count + 1);
latest_fail_time_us = static_cast<size_t>(Poco::Timestamp().epochMicroseconds());
}
bool partCanBeMutated()
{
if (max_postpone_time_ms == 0)
return true;
auto current_time_us = static_cast<size_t>(Poco::Timestamp().epochMicroseconds());
return current_time_us >= getNextMinExecutionTimeUsResolution();
}
};
using DataPartsWithRetryInfo = std::unordered_map<String, PartMutationInfo>;
DataPartsWithRetryInfo failed_mutation_parts;
mutable std::mutex parts_info_lock;
public:
void resetMutationFailures()
{
std::unique_lock _lock(parts_info_lock);
failed_mutation_parts.clear();
}
void removePartFromFailed(const String & part_name)
{
std::unique_lock _lock(parts_info_lock);
failed_mutation_parts.erase(part_name);
}
void addPartMutationFailure (const String& part_name, size_t max_postpone_time_ms_)
{
std::unique_lock _lock(parts_info_lock);
auto part_info_it = failed_mutation_parts.find(part_name);
if (part_info_it == failed_mutation_parts.end())
{
auto [it, success] = failed_mutation_parts.emplace(part_name, PartMutationInfo(max_postpone_time_ms_));
std::swap(it, part_info_it);
}
auto& part_info = part_info_it->second;
part_info.addPartFailure();
}
bool partCanBeMutated(const String& part_name)
{
std::unique_lock _lock(parts_info_lock);
auto iter = failed_mutation_parts.find(part_name);
if (iter == failed_mutation_parts.end())
return true;
return iter->second.partCanBeMutated();
}
};
/// Controls postponing logic for failed mutations.
PartMutationBackoffPolicy mutation_backoff_policy;
/// If part is assigned to merge or mutation (possibly replicated)
/// Should be overridden by children, because they can have different
/// mechanisms for parts locking

View File

@ -146,6 +146,7 @@ struct Settings;
M(UInt64, vertical_merge_algorithm_min_rows_to_activate, 16 * 8192, "Minimal (approximate) sum of rows in merging parts to activate Vertical merge algorithm.", 0) \
M(UInt64, vertical_merge_algorithm_min_bytes_to_activate, 0, "Minimal (approximate) uncompressed size in bytes in merging parts to activate Vertical merge algorithm.", 0) \
M(UInt64, vertical_merge_algorithm_min_columns_to_activate, 11, "Minimal amount of non-PK columns to activate Vertical merge algorithm.", 0) \
M(UInt64, max_postpone_time_for_failed_mutations_ms, 5ULL * 60 * 1000, "The maximum postpone time for failed mutations.", 0) \
\
/** Compatibility settings */ \
M(Bool, allow_suspicious_indices, false, "Reject primary/secondary indexes and sorting keys with identical expressions", 0) \

View File

@ -1,6 +1,7 @@
#include <Storages/MergeTree/ReplicatedMergeMutateTaskBase.h>
#include <Storages/StorageReplicatedMergeTree.h>
#include <Storages/MergeTree/MergeTreeData.h>
#include <Storages/MergeTree/ReplicatedMergeTreeQueue.h>
#include <Common/ProfileEventsScope.h>
@ -110,11 +111,14 @@ bool ReplicatedMergeMutateTaskBase::executeStep()
auto mutations_end_it = in_partition->second.upper_bound(result_data_version);
for (auto it = mutations_begin_it; it != mutations_end_it; ++it)
{
auto & src_part = log_entry->source_parts.at(0);
ReplicatedMergeTreeQueue::MutationStatus & status = *it->second;
status.latest_failed_part = log_entry->source_parts.at(0);
status.latest_failed_part = src_part;
status.latest_failed_part_info = source_part_info;
status.latest_fail_time = time(nullptr);
status.latest_fail_reason = getExceptionMessage(saved_exception, false);
if (result_data_version == it->first)
storage.mutation_backoff_policy.addPartMutationFailure(src_part, storage.getSettings()->max_postpone_time_for_failed_mutations_ms);
}
}
}
@ -142,6 +146,12 @@ bool ReplicatedMergeMutateTaskBase::executeImpl()
{
storage.queue.removeProcessedEntry(storage.getZooKeeper(), selected_entry->log_entry);
state = State::SUCCESS;
auto & log_entry = selected_entry->log_entry;
if (log_entry->type == ReplicatedMergeTreeLogEntryData::MUTATE_PART)
{
storage.mutation_backoff_policy.removePartFromFailed(log_entry->source_parts.at(0));
}
}
catch (...)
{

View File

@ -13,6 +13,7 @@
#include <base/sort.h>
#include <ranges>
#include <Poco/Timestamp.h>
namespace DB
{
@ -1353,9 +1354,17 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
sum_parts_size_in_bytes += part_in_memory->block.bytes();
else
sum_parts_size_in_bytes += part->getBytesOnDisk();
if (entry.type == LogEntry::MUTATE_PART && !storage.mutation_backoff_policy.partCanBeMutated(part->name))
{
constexpr auto fmt_string = "Not executing log entry {} of type {} for part {} "
"because recently it has failed. According to exponential backoff policy, put aside this log entry.";
LOG_DEBUG(LogToStr(out_postpone_reason, log), fmt_string, entry.znode_name, entry.typeToString(), entry.new_part_name);
return false;
}
}
}
if (merger_mutator.merges_blocker.isCancelled())
{
constexpr auto fmt_string = "Not executing log entry {} of type {} for part {} because merges and mutations are cancelled now.";

View File

@ -26,6 +26,8 @@
#include <IO/Archives/createArchiveReader.h>
#include <IO/Archives/IArchiveReader.h>
#include <IO/PeekableReadBuffer.h>
#include <IO/AsynchronousReadBufferFromFile.h>
#include <Disks/IO/IOUringReader.h>
#include <Formats/FormatFactory.h>
#include <Formats/ReadSchemaUtils.h>
@ -92,6 +94,7 @@ namespace ErrorCodes
extern const int CANNOT_EXTRACT_TABLE_STRUCTURE;
extern const int CANNOT_DETECT_FORMAT;
extern const int CANNOT_COMPILE_REGEXP;
extern const int UNSUPPORTED_METHOD;
}
namespace
@ -276,6 +279,22 @@ std::unique_ptr<ReadBuffer> selectReadBuffer(
ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary);
}
else if (read_method == LocalFSReadMethod::io_uring && !use_table_fd)
{
#if USE_LIBURING
auto & reader = context->getIOURingReader();
if (!reader.isSupported())
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "io_uring is not supported by this system");
res = std::make_unique<AsynchronousReadBufferFromFileWithDescriptorsCache>(
reader,
Priority{},
current_path,
context->getSettingsRef().max_read_buffer_size);
#else
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Read method io_uring is only supported in Linux");
#endif
}
else
{
if (use_table_fd)

View File

@ -1,8 +1,5 @@
#include <Storages/StorageInMemoryMetadata.h>
#include <Access/AccessControl.h>
#include <Access/User.h>
#include <Common/HashTable/HashMap.h>
#include <Common/HashTable/HashSet.h>
#include <Common/quoteString.h>
@ -10,7 +7,6 @@
#include <Core/ColumnWithTypeAndName.h>
#include <DataTypes/NestedUtils.h>
#include <DataTypes/DataTypeEnum.h>
#include <Interpreters/Context.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <IO/Operators.h>
@ -27,7 +23,6 @@ namespace ErrorCodes
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
extern const int TYPE_MISMATCH;
extern const int EMPTY_LIST_OF_COLUMNS_PASSED;
extern const int LOGICAL_ERROR;
}
StorageInMemoryMetadata::StorageInMemoryMetadata(const StorageInMemoryMetadata & other)
@ -46,8 +41,6 @@ StorageInMemoryMetadata::StorageInMemoryMetadata(const StorageInMemoryMetadata &
, settings_changes(other.settings_changes ? other.settings_changes->clone() : nullptr)
, select(other.select)
, refresh(other.refresh ? other.refresh->clone() : nullptr)
, definer(other.definer)
, sql_security_type(other.sql_security_type)
, comment(other.comment)
, metadata_version(other.metadata_version)
{
@ -78,8 +71,6 @@ StorageInMemoryMetadata & StorageInMemoryMetadata::operator=(const StorageInMemo
settings_changes.reset();
select = other.select;
refresh = other.refresh ? other.refresh->clone() : nullptr;
definer = other.definer;
sql_security_type = other.sql_security_type;
comment = other.comment;
metadata_version = other.metadata_version;
return *this;
@ -90,69 +81,6 @@ void StorageInMemoryMetadata::setComment(const String & comment_)
comment = comment_;
}
void StorageInMemoryMetadata::setDefiner(const ASTSQLSecurity & sql_security)
{
if (sql_security.definer)
definer = sql_security.definer->toString();
sql_security_type = sql_security.type;
}
UUID StorageInMemoryMetadata::getDefinerID(DB::ContextPtr context) const
{
if (!definer)
{
if (const auto definer_id = context->getUserID())
return *definer_id;
throw Exception(ErrorCodes::LOGICAL_ERROR, "No user in context for sub query execution.");
}
const auto & access_control = context->getAccessControl();
return access_control.getID<User>(*definer);
}
ContextMutablePtr StorageInMemoryMetadata::getSQLSecurityOverriddenContext(ContextPtr context) const
{
if (!sql_security_type.has_value())
return Context::createCopy(context);
if (sql_security_type == SQLSecurityType::INVOKER)
return Context::createCopy(context);
auto new_context = Context::createCopy(context->getGlobalContext());
new_context->setClientInfo(context->getClientInfo());
new_context->makeQueryContext();
const auto & database = context->getCurrentDatabase();
if (!database.empty())
new_context->setCurrentDatabase(database);
new_context->setInsertionTable(context->getInsertionTable(), context->getInsertionTableColumnNames());
new_context->setProgressCallback(context->getProgressCallback());
new_context->setProcessListElement(context->getProcessListElement());
if (context->getCurrentTransaction())
new_context->setCurrentTransaction(context->getCurrentTransaction());
if (context->getZooKeeperMetadataTransaction())
new_context->initZooKeeperMetadataTransaction(context->getZooKeeperMetadataTransaction());
if (sql_security_type == SQLSecurityType::NONE)
{
new_context->applySettingsChanges(context->getSettingsRef().changes());
return new_context;
}
new_context->setUser(getDefinerID(context));
auto changed_settings = context->getSettingsRef().changes();
new_context->clampToSettingsConstraints(changed_settings, SettingSource::QUERY);
new_context->applySettingsChanges(changed_settings);
return new_context;
}
void StorageInMemoryMetadata::setColumns(ColumnsDescription columns_)
{
if (columns_.getAllPhysical().empty())

View File

@ -1,7 +1,5 @@
#pragma once
#include <Parsers/Access/ASTUserNameWithHost.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/IAST_fwd.h>
#include <Storages/ColumnDependency.h>
#include <Storages/ColumnsDescription.h>
@ -53,14 +51,6 @@ struct StorageInMemoryMetadata
/// Materialized view REFRESH parameters.
ASTPtr refresh;
/// DEFINER <user_name>. Allows to specify a definer of the table.
/// Supported for MaterializedView and View.
std::optional<String> definer;
/// SQL SECURITY <DEFINER | INVOKER | NONE>
/// Supported for MaterializedView and View.
std::optional<SQLSecurityType> sql_security_type;
String comment;
/// Version of metadata. Managed properly by ReplicatedMergeTree only
@ -115,15 +105,6 @@ struct StorageInMemoryMetadata
/// Get copy of current metadata with metadata_version_
StorageInMemoryMetadata withMetadataVersion(int32_t metadata_version_) const;
/// Sets a definer for the storage.
void setDefiner(const ASTSQLSecurity & sql_security);
UUID getDefinerID(ContextPtr context) const;
/// Returns a copy of the context with the correct user from SQL security options.
/// If the SQL security wasn't set, this is equivalent to `Context::createCopy(context)`.
/// The context from this function must be used every time whenever views execute any read/write operations or subqueries.
ContextMutablePtr getSQLSecurityOverriddenContext(ContextPtr context) const;
/// Returns combined set of columns
const ColumnsDescription & getColumns() const;

View File

@ -39,7 +39,6 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
extern const int NOT_IMPLEMENTED;
extern const int INCORRECT_QUERY;
extern const int QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW;
extern const int TOO_MANY_MATERIALIZED_VIEWS;
}
@ -78,11 +77,6 @@ StorageMaterializedView::StorageMaterializedView(
{
StorageInMemoryMetadata storage_metadata;
storage_metadata.setColumns(columns_);
if (query.sql_security)
storage_metadata.setDefiner(query.sql_security->as<ASTSQLSecurity &>());
if (storage_metadata.sql_security_type == SQLSecurityType::INVOKER)
throw Exception(ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW, "SQL SECURITY INVOKER can't be specified for MATERIALIZED VIEW");
if (!query.select)
throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName());
@ -181,28 +175,19 @@ void StorageMaterializedView::read(
const size_t max_block_size,
const size_t num_streams)
{
auto context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(local_context);
auto storage = getTargetTable();
auto lock = storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout);
auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout);
auto target_metadata_snapshot = storage->getInMemoryMetadataPtr();
auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot, context);
auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot, local_context);
if (query_info.order_optimizer)
query_info.input_order_info = query_info.order_optimizer->getInputOrder(target_metadata_snapshot, context);
query_info.input_order_info = query_info.order_optimizer->getInputOrder(target_metadata_snapshot, local_context);
if (!getInMemoryMetadataPtr()->select.select_table_id.empty())
context->checkAccess(AccessType::SELECT, getInMemoryMetadataPtr()->select.select_table_id, column_names);
auto storage_id = storage->getStorageID();
/// We don't need to check access if the inner table was created automatically.
if (!has_inner_table && !storage_id.empty())
context->checkAccess(AccessType::SELECT, storage_id, column_names);
storage->read(query_plan, column_names, target_storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams);
storage->read(query_plan, column_names, target_storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams);
if (query_plan.isInitialized())
{
auto mv_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, context, processed_stage);
auto mv_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, local_context, processed_stage);
auto target_header = query_plan.getCurrentDataStream().header;
/// No need to convert columns that does not exists in MV
@ -237,20 +222,11 @@ void StorageMaterializedView::read(
SinkToStoragePtr StorageMaterializedView::write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr local_context, bool async_insert)
{
auto context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(local_context);
auto storage = getTargetTable();
auto lock = storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout);
auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout);
auto metadata_snapshot = storage->getInMemoryMetadataPtr();
auto storage_id = storage->getStorageID();
/// We don't need to check access if the inner table was created automatically.
if (!has_inner_table && !storage_id.empty())
{
auto query_sample_block = InterpreterInsertQuery::getSampleBlock(query->as<ASTInsertQuery &>(), storage, metadata_snapshot, context);
context->checkAccess(AccessType::INSERT, storage_id, query_sample_block.getNames());
}
auto sink = storage->write(query, metadata_snapshot, context, async_insert);
auto sink = storage->write(query, metadata_snapshot, local_context, async_insert);
sink->addTableLock(lock);
return sink;
@ -321,7 +297,7 @@ bool StorageMaterializedView::optimize(
std::tuple<ContextMutablePtr, std::shared_ptr<ASTInsertQuery>> StorageMaterializedView::prepareRefresh() const
{
auto refresh_context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(getContext());
auto refresh_context = Context::createCopy(getContext());
/// Generate a random query id.
refresh_context->setCurrentQueryId("");
@ -402,24 +378,15 @@ void StorageMaterializedView::checkAlterIsPossible(const AlterCommands & command
{
for (const auto & command : commands)
{
if (command.type == AlterCommand::MODIFY_SQL_SECURITY)
{
if (command.sql_security->as<ASTSQLSecurity &>().type == SQLSecurityType::INVOKER)
throw Exception(ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW, "SQL SECURITY INVOKER can't be specified for MATERIALIZED VIEW");
if (command.isCommentAlter())
continue;
}
else if (command.isCommentAlter())
if (command.type == AlterCommand::MODIFY_QUERY)
continue;
else if (command.type == AlterCommand::MODIFY_QUERY)
if (command.type == AlterCommand::MODIFY_REFRESH && refresher)
continue;
else if (command.type == AlterCommand::MODIFY_REFRESH && refresher)
continue;
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Alter of type '{}' is not supported by storage {}",
command.type, getName());
command.type, getName());
}
}
void StorageMaterializedView::checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const

View File

@ -5,6 +5,7 @@
#include <optional>
#include <ranges>
#include <Poco/Timestamp.h>
#include <base/sort.h>
#include <Backups/BackupEntriesCollector.h>
#include <Databases/IDatabase.h>
@ -538,6 +539,8 @@ void StorageMergeTree::updateMutationEntriesErrors(FutureMergedMutatedPartPtr re
Int64 sources_data_version = result_part->parts.at(0)->info.getDataVersion();
Int64 result_data_version = result_part->part_info.getDataVersion();
auto & failed_part = result_part->parts.at(0);
if (sources_data_version != result_data_version)
{
std::lock_guard lock(currently_processing_in_background_mutex);
@ -555,14 +558,21 @@ void StorageMergeTree::updateMutationEntriesErrors(FutureMergedMutatedPartPtr re
entry.latest_failed_part_info = MergeTreePartInfo();
entry.latest_fail_time = 0;
entry.latest_fail_reason.clear();
if (static_cast<UInt64>(result_part->part_info.mutation) == it->first)
mutation_backoff_policy.removePartFromFailed(failed_part->name);
}
}
else
{
entry.latest_failed_part = result_part->parts.at(0)->name;
entry.latest_failed_part_info = result_part->parts.at(0)->info;
entry.latest_failed_part = failed_part->name;
entry.latest_failed_part_info = failed_part->info;
entry.latest_fail_time = time(nullptr);
entry.latest_fail_reason = exception_message;
if (static_cast<UInt64>(result_part->part_info.mutation) == it->first)
{
mutation_backoff_policy.addPartMutationFailure(failed_part->name, getSettings()->max_postpone_time_for_failed_mutations_ms);
}
}
}
}
@ -833,6 +843,8 @@ CancellationCode StorageMergeTree::killMutation(const String & mutation_id)
}
}
mutation_backoff_policy.resetMutationFailures();
if (!to_kill)
return CancellationCode::NotFound;
@ -1217,6 +1229,12 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMutate(
TransactionID first_mutation_tid = mutations_begin_it->second.tid;
MergeTreeTransactionPtr txn;
if (!mutation_backoff_policy.partCanBeMutated(part->name))
{
LOG_DEBUG(log, "According to exponential backoff policy, do not perform mutations for the part {} yet. Put it aside.", part->name);
continue;
}
if (!first_mutation_tid.isPrehistoric())
{

View File

@ -7460,6 +7460,7 @@ CancellationCode StorageReplicatedMergeTree::killMutation(const String & mutatio
Int64 block_number = pair.second;
getContext()->getMergeList().cancelPartMutations(getStorageID(), partition_id, block_number);
}
mutation_backoff_policy.resetMutationFailures();
return CancellationCode::CancelSent;
}

View File

@ -12,7 +12,6 @@
#include <Parsers/ASTSubquery.h>
#include <Parsers/ASTTablesInSelectQuery.h>
#include <Storages/AlterCommands.h>
#include <Storages/StorageView.h>
#include <Storages/StorageFactory.h>
#include <Storages/SelectQueryDescription.h>
@ -36,7 +35,6 @@ namespace ErrorCodes
{
extern const int INCORRECT_QUERY;
extern const int LOGICAL_ERROR;
extern const int NOT_IMPLEMENTED;
}
@ -92,10 +90,10 @@ bool hasJoin(const ASTSelectWithUnionQuery & ast)
/** There are no limits on the maximum size of the result for the view.
* Since the result of the view is not the result of the entire query.
*/
ContextPtr getViewContext(ContextPtr context, const StorageSnapshotPtr & storage_snapshot)
ContextPtr getViewContext(ContextPtr context)
{
auto view_context = storage_snapshot->metadata->getSQLSecurityOverriddenContext(context);
Settings view_settings = view_context->getSettings();
auto view_context = Context::createCopy(context);
Settings view_settings = context->getSettings();
view_settings.max_result_rows = 0;
view_settings.max_result_bytes = 0;
view_settings.extremes = false;
@ -124,8 +122,6 @@ StorageView::StorageView(
storage_metadata.setColumns(columns_);
storage_metadata.setComment(comment);
if (query.sql_security)
storage_metadata.setDefiner(query.sql_security->as<ASTSQLSecurity &>());
if (!query.select)
throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName());
@ -164,13 +160,13 @@ void StorageView::read(
if (context->getSettingsRef().allow_experimental_analyzer)
{
InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context, storage_snapshot), options);
InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context), options);
interpreter.addStorageLimits(*query_info.storage_limits);
query_plan = std::move(interpreter).extractQueryPlan();
}
else
{
InterpreterSelectWithUnionQuery interpreter(current_inner_query, getViewContext(context, storage_snapshot), options, column_names);
InterpreterSelectWithUnionQuery interpreter(current_inner_query, getViewContext(context), options, column_names);
interpreter.addStorageLimits(*query_info.storage_limits);
interpreter.buildQueryPlan(query_plan);
}
@ -286,15 +282,6 @@ ASTPtr StorageView::restoreViewName(ASTSelectQuery & select_query, const ASTPtr
return subquery->children[0];
}
void StorageView::checkAlterIsPossible(const AlterCommands & commands, ContextPtr /* local_context */) const
{
for (const auto & command : commands)
{
if (!command.isCommentAlter() && command.type != AlterCommand::MODIFY_SQL_SECURITY)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Alter of type '{}' is not supported by storage {}", command.type, getName());
}
}
void registerStorageView(StorageFactory & factory)
{
factory.registerStorage("View", [](const StorageFactory::Arguments & args)

View File

@ -26,8 +26,6 @@ public:
bool supportsSampling() const override { return true; }
bool supportsFinal() const override { return true; }
void checkAlterIsPossible(const AlterCommands & commands, ContextPtr local_context) const override;
void read(
QueryPlan & query_plan,
const Names & column_names,

View File

@ -29,7 +29,6 @@ namespace
VIEW,
COLUMN,
NAMED_COLLECTION,
USER_NAME,
};
DataTypeEnum8::Values getLevelEnumValues()
@ -42,7 +41,6 @@ namespace
enum_values.emplace_back("VIEW", static_cast<Int8>(VIEW));
enum_values.emplace_back("COLUMN", static_cast<Int8>(COLUMN));
enum_values.emplace_back("NAMED_COLLECTION", static_cast<Int8>(NAMED_COLLECTION));
enum_values.emplace_back("USER_NAME", static_cast<Int8>(USER_NAME));
return enum_values;
}
}

View File

@ -35,9 +35,8 @@ static constexpr std::string_view schemata = R"(
`DEFAULT_CHARACTER_SET_SCHEMA` Nullable(String),
`DEFAULT_CHARACTER_SET_NAME` Nullable(String),
`SQL_PATH` Nullable(String)
)
SQL SECURITY INVOKER
AS SELECT
) AS
SELECT
name AS catalog_name,
name AS schema_name,
'default' AS schema_owner,
@ -74,9 +73,8 @@ static constexpr std::string_view tables = R"(
`DATA_LENGTH` Nullable(UInt64),
`TABLE_COLLATION` Nullable(String),
`TABLE_COMMENT` Nullable(String)
)
SQL SECURITY INVOKER
AS SELECT
) AS
SELECT
database AS table_catalog,
database AS table_schema,
name AS table_name,
@ -124,9 +122,8 @@ static constexpr std::string_view views = R"(
`IS_TRIGGER_UPDATABLE` Enum8('NO' = 0, 'YES' = 1),
`IS_TRIGGER_DELETABLE` Enum8('NO' = 0, 'YES' = 1),
`IS_TRIGGER_INSERTABLE_INTO` Enum8('NO' = 0, 'YES' = 1)
)
SQL SECURITY INVOKER
AS SELECT
) AS
SELECT
database AS table_catalog,
database AS table_schema,
name AS table_name,
@ -206,9 +203,8 @@ static constexpr std::string_view columns = R"(
`EXTRA` Nullable(String),
`COLUMN_COMMENT` String,
`COLUMN_TYPE` String
)
SQL SECURITY INVOKER
AS SELECT
) AS
SELECT
database AS table_catalog,
database AS table_schema,
table AS table_name,
@ -295,9 +291,8 @@ static constexpr std::string_view key_column_usage = R"(
`REFERENCED_TABLE_SCHEMA` Nullable(String),
`REFERENCED_TABLE_NAME` Nullable(String),
`REFERENCED_COLUMN_NAME` Nullable(String)
)
SQL SECURITY INVOKER
AS SELECT
) AS
SELECT
'def' AS constraint_catalog,
database AS constraint_schema,
'PRIMARY' AS constraint_name,
@ -351,9 +346,8 @@ static constexpr std::string_view referential_constraints = R"(
`DELETE_RULE` String,
`TABLE_NAME` String,
`REFERENCED_TABLE_NAME` String
)
SQL SECURITY INVOKER
AS SELECT
) AS
SELECT
'' AS constraint_catalog,
NULL AS constraint_name,
'' AS constraint_schema,
@ -418,9 +412,8 @@ static constexpr std::string_view statistics = R"(
`INDEX_COMMENT` String,
`IS_VISIBLE` String,
`EXPRESSION` Nullable(String)
)
SQL SECURITY INVOKER
AS SELECT
) AS
SELECT
'' AS table_catalog,
'' AS table_schema,
'' AS table_name,

View File

@ -88,26 +88,38 @@ void TableFunctionMerge::parseArguments(const ASTPtr & ast_function, ContextPtr
if (args_func.size() != 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Table function 'merge' requires exactly 2 arguments - name "
"of source database and regexp for table names.");
"Table function 'merge' requires from 1 to 2 parameters: "
"merge(['db_name',] 'tables_regexp')");
ASTs & args = args_func.at(0)->children;
if (args.size() != 2)
if (args.size() == 1)
{
database_is_regexp = false;
source_database_name_or_regexp = context->getCurrentDatabase();
args[0] = evaluateConstantExpressionAsLiteral(args[0], context);
source_table_regexp = checkAndGetLiteralArgument<String>(args[0], "table_name_regexp");
}
else if (args.size() == 2)
{
auto [is_regexp, database_ast] = StorageMerge::evaluateDatabaseName(args[0], context);
database_is_regexp = is_regexp;
if (!is_regexp)
args[0] = database_ast;
source_database_name_or_regexp = checkAndGetLiteralArgument<String>(database_ast, "database_name");
args[1] = evaluateConstantExpressionAsLiteral(args[1], context);
source_table_regexp = checkAndGetLiteralArgument<String>(args[1], "table_name_regexp");
}
else
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Table function 'merge' requires exactly 2 arguments - name "
"of source database and regexp for table names.");
auto [is_regexp, database_ast] = StorageMerge::evaluateDatabaseName(args[0], context);
database_is_regexp = is_regexp;
if (!is_regexp)
args[0] = database_ast;
source_database_name_or_regexp = checkAndGetLiteralArgument<String>(database_ast, "database_name");
args[1] = evaluateConstantExpressionAsLiteral(args[1], context);
source_table_regexp = checkAndGetLiteralArgument<String>(args[1], "table_name_regexp");
"Table function 'merge' requires from 1 to 2 parameters: "
"merge(['db_name',] 'tables_regexp')");
}
}

View File

@ -2,7 +2,6 @@ test_build_sets_from_multiple_threads/test.py::test_set
test_concurrent_backups_s3/test.py::test_concurrent_backups
test_distributed_type_object/test.py::test_distributed_type_object
test_executable_table_function/test.py::test_executable_function_input_python
test_mask_sensitive_info/test.py::test_encryption_functions
test_merge_table_over_distributed/test.py::test_global_in
test_merge_table_over_distributed/test.py::test_select_table_name_from_merge_over_distributed
test_passing_max_partitions_to_read_remotely/test.py::test_default_database_on_cluster

View File

@ -114,6 +114,7 @@ def main():
"report.html": workspace_path / "report.html",
"core.zst": workspace_path / "core.zst",
"dmesg.log": workspace_path / "dmesg.log",
"fatal.log": workspace_path / "fatal.log",
}
compressed_server_log_path = workspace_path / "server.log.zst"

View File

@ -13,7 +13,6 @@ import sys
import os
import os.path
import glob
import platform
import signal
import re
import copy
@ -574,6 +573,27 @@ def get_localzone():
return os.getenv("TZ", "/".join(os.readlink("/etc/localtime").split("/")[-2:]))
def supports_io_uring():
return not subprocess.call(
[
args.binary,
"-q",
"select * from file('/dev/null', 'LineAsString')",
"--storage_file_read_method",
"io_uring",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def get_local_filesystem_methods():
methods = ["read", "pread", "mmap", "pread_threadpool"]
if supports_io_uring():
methods.append("io_uring")
return methods
class SettingsRandomizer:
settings = {
"max_insert_threads": lambda: 0
@ -614,10 +634,7 @@ class SettingsRandomizer:
0.2, 0.5, 1, 10 * 1024 * 1024 * 1024
),
"local_filesystem_read_method": lambda: random.choice(
# Allow to use uring only when running on Linux
["read", "pread", "mmap", "pread_threadpool", "io_uring"]
if platform.system().lower() == "linux"
else ["read", "pread", "mmap", "pread_threadpool"]
get_local_filesystem_methods()
),
"remote_filesystem_read_method": lambda: random.choice(["read", "threadpool"]),
"local_filesystem_read_prefetch": lambda: random.randint(0, 1),

View File

@ -0,0 +1,5 @@
<clickhouse>
<merge_tree>
<max_postpone_time_for_failed_mutations_ms>200</max_postpone_time_for_failed_mutations_ms>
</merge_tree>
</clickhouse>

View File

@ -30,6 +30,7 @@ ln -sf $SRC_PATH/config.d/graphite_alternative.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/database_atomic.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/max_concurrent_queries.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/merge_tree_settings.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/backoff_failed_mutation.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/merge_tree_old_dirs_cleanup.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/test_cluster_with_incorrect_pw.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/keeper_port.xml $DEST_SERVER_PATH/config.d/

View File

@ -104,8 +104,8 @@ def run_and_check(
res = subprocess.run(
args, stdout=stdout, stderr=stderr, env=env, shell=shell, timeout=timeout
)
out = res.stdout.decode("utf-8")
err = res.stderr.decode("utf-8")
out = res.stdout.decode("utf-8", "ignore")
err = res.stderr.decode("utf-8", "ignore")
# check_call(...) from subprocess does not print stderr, so we do it manually
for outline in out.splitlines():
logging.debug(f"Stdout:{outline}")

View File

@ -0,0 +1,5 @@
<clickhouse>
<merge_tree>
<max_postpone_time_for_failed_mutations_ms>60000</max_postpone_time_for_failed_mutations_ms>
</merge_tree>
</clickhouse>

View File

@ -0,0 +1,5 @@
<clickhouse>
<merge_tree>
<max_postpone_time_for_failed_mutations_ms>0</max_postpone_time_for_failed_mutations_ms>
</merge_tree>
</clickhouse>

View File

@ -0,0 +1,201 @@
import time
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node_with_backoff = cluster.add_instance(
"node_with_backoff",
macros={"cluster": "test_cluster"},
main_configs=["configs/config.d/backoff_mutation_policy.xml"],
with_zookeeper=True,
stay_alive=True,
)
node_no_backoff = cluster.add_instance(
"node_no_backoff",
macros={"cluster": "test_cluster"},
main_configs=["configs/config.d/no_backoff_mutation_policy.xml"],
with_zookeeper=True,
)
REPLICATED_POSPONE_MUTATION_LOG = (
"According to exponential backoff policy, put aside this log entry"
)
POSPONE_MUTATION_LOG = (
"According to exponential backoff policy, do not perform mutations for the part"
)
all_nodes = [node_with_backoff, node_no_backoff]
def prepare_cluster(use_replicated_table):
for node in all_nodes:
node.query("DROP TABLE IF EXISTS test_mutations SYNC")
engine = (
"ReplicatedMergeTree('/clickhouse/{cluster}/tables/test/test_mutations', '{instance}')"
if use_replicated_table
else "MergeTree()"
)
for node in all_nodes:
node.rotate_logs()
node.query(f"CREATE TABLE test_mutations(x UInt32) ENGINE {engine} ORDER BY x")
node.query("INSERT INTO test_mutations SELECT * FROM system.numbers LIMIT 10")
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
@pytest.mark.parametrize(
("node, found_in_log"),
[
(
node_with_backoff,
True,
),
(
node_no_backoff,
False,
),
],
)
def test_exponential_backoff_with_merge_tree(started_cluster, node, found_in_log):
prepare_cluster(False)
# Executing incorrect mutation.
node.query(
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1"
)
assert node.contains_in_log(POSPONE_MUTATION_LOG) == found_in_log
node.rotate_logs()
node.query("KILL MUTATION WHERE table='test_mutations'")
# Check that after kill new parts mutations are postponing.
node.query(
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1"
)
assert node.contains_in_log(POSPONE_MUTATION_LOG) == found_in_log
def test_exponential_backoff_with_replicated_tree(started_cluster):
prepare_cluster(True)
node_with_backoff.query(
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1"
)
assert node_with_backoff.wait_for_log_line(REPLICATED_POSPONE_MUTATION_LOG)
assert not node_no_backoff.contains_in_log(REPLICATED_POSPONE_MUTATION_LOG)
@pytest.mark.parametrize(
("node"),
[
(node_with_backoff),
],
)
def test_exponential_backoff_create_dependent_table(started_cluster, node):
prepare_cluster(False)
# Executing incorrect mutation.
node.query(
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1"
)
# Creating dependent table for mutation.
node.query("CREATE TABLE dep_table(x UInt32) ENGINE MergeTree() ORDER BY x")
retry_count = 100
no_unfinished_mutation = False
for _ in range(0, retry_count):
if node.query("SELECT count() FROM system.mutations WHERE is_done=0") == "0\n":
no_unfinished_mutation = True
break
assert no_unfinished_mutation
node.query("DROP TABLE IF EXISTS dep_table SYNC")
def test_exponential_backoff_setting_override(started_cluster):
node = node_with_backoff
node.rotate_logs()
node.query("DROP TABLE IF EXISTS test_mutations SYNC")
node.query(
"CREATE TABLE test_mutations(x UInt32) ENGINE=MergeTree() ORDER BY x SETTINGS max_postpone_time_for_failed_mutations_ms=0"
)
node.query("INSERT INTO test_mutations SELECT * FROM system.numbers LIMIT 10")
# Executing incorrect mutation.
node.query(
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1"
)
assert not node.contains_in_log(POSPONE_MUTATION_LOG)
@pytest.mark.parametrize(
("replicated_table"),
[
(False),
(True),
],
)
def test_backoff_clickhouse_restart(started_cluster, replicated_table):
prepare_cluster(replicated_table)
node = node_with_backoff
# Executing incorrect mutation.
node.query(
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1"
)
assert node.wait_for_log_line(
REPLICATED_POSPONE_MUTATION_LOG if replicated_table else POSPONE_MUTATION_LOG
)
node.restart_clickhouse()
node.rotate_logs()
assert node.wait_for_log_line(
REPLICATED_POSPONE_MUTATION_LOG if replicated_table else POSPONE_MUTATION_LOG
)
@pytest.mark.parametrize(
("replicated_table"),
[
(False),
(True),
],
)
def test_no_backoff_after_killing_mutation(started_cluster, replicated_table):
prepare_cluster(replicated_table)
node = node_with_backoff
# Executing incorrect mutation.
node.query(
"ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM dep_table) SETTINGS allow_nondeterministic_mutations=1"
)
# Executing correct mutation.
node.query("ALTER TABLE test_mutations DELETE WHERE x=1")
assert node.wait_for_log_line(
REPLICATED_POSPONE_MUTATION_LOG if replicated_table else POSPONE_MUTATION_LOG
)
mutation_ids = node.query("select mutation_id from system.mutations").split()
node.query(
f"KILL MUTATION WHERE table = 'test_mutations' AND mutation_id = '{mutation_ids[0]}'"
)
node.rotate_logs()
assert not node.contains_in_log(
REPLICATED_POSPONE_MUTATION_LOG if replicated_table else POSPONE_MUTATION_LOG
)

View File

@ -723,7 +723,6 @@ def test_materialized_view(started_cluster):
pg_manager.execute(f"INSERT INTO test_table SELECT 3, 4")
check_tables_are_synchronized(instance, "test_table")
assert "1\t2\n3\t4" == instance.query("SELECT * FROM mv ORDER BY 1, 2").strip()
instance.query("DROP VIEW mv")
pg_manager.drop_materialized_db()

Some files were not shown because too many files have changed in this diff Show More