mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 17:41:59 +00:00
Merge remote-tracking branch 'rschu1ze/master' into annoy_cleanup
This commit is contained in:
commit
52e265badd
5
.github/workflows/pull_request.yml
vendored
5
.github/workflows/pull_request.yml
vendored
@ -46,7 +46,12 @@ jobs:
|
||||
- name: Python unit tests
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
echo "Testing the main ci directory"
|
||||
python3 -m unittest discover -s . -p '*_test.py'
|
||||
for dir in *_lambda/; do
|
||||
echo "Testing $dir"
|
||||
python3 -m unittest discover -s "$dir" -p '*_test.py'
|
||||
done
|
||||
DockerHubPushAarch64:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
|
@ -626,7 +626,9 @@ if args.report == "main":
|
||||
message_array.append(str(faster_queries) + " faster")
|
||||
|
||||
if slower_queries:
|
||||
if slower_queries > 3:
|
||||
# This threshold should be synchronized with the value in https://github.com/ClickHouse/ClickHouse/blob/master/tests/ci/performance_comparison_check.py#L225
|
||||
# False positives rate should be < 1%: https://shorturl.at/CDEK8
|
||||
if slower_queries > 5:
|
||||
status = "failure"
|
||||
message_array.append(str(slower_queries) + " slower")
|
||||
|
||||
|
@ -42,7 +42,6 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
[rabbitmq_queue_consume = false,]
|
||||
[rabbitmq_address = '',]
|
||||
[rabbitmq_vhost = '/',]
|
||||
[rabbitmq_queue_consume = false,]
|
||||
[rabbitmq_username = '',]
|
||||
[rabbitmq_password = '',]
|
||||
[rabbitmq_commit_on_select = false,]
|
||||
|
@ -109,7 +109,7 @@ INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
VALUES (1667446031, 1, 6, 3)
|
||||
```
|
||||
|
||||
The data are inserted in both the table and the materialized view `test.mv_visits`.
|
||||
The data is inserted in both the table and the materialized view `test.mv_visits`.
|
||||
|
||||
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the materialized view `test.mv_visits`:
|
||||
|
||||
|
@ -15,6 +15,18 @@ tokenized cells of the string column. For example, the string cell "I will be a
|
||||
" wi", "wil", "ill", "ll ", "l b", " be" etc. The more fine-granular the input strings are tokenized, the bigger but also the more
|
||||
useful the resulting inverted index will be.
|
||||
|
||||
<div class='vimeo-container'>
|
||||
<iframe src="//www.youtube.com/embed/O_MnyUkrIq8"
|
||||
width="640"
|
||||
height="360"
|
||||
frameborder="0"
|
||||
allow="autoplay;
|
||||
fullscreen;
|
||||
picture-in-picture"
|
||||
allowfullscreen>
|
||||
</iframe>
|
||||
</div>
|
||||
|
||||
:::note
|
||||
Inverted indexes are experimental and should not be used in production environments yet. They may change in the future in backward-incompatible
|
||||
ways, for example with respect to their DDL/DQL syntax or performance/compression characteristics.
|
||||
|
@ -75,7 +75,7 @@ SELECT
|
||||
payment_type,
|
||||
pickup_ntaname,
|
||||
dropoff_ntaname
|
||||
FROM s3(
|
||||
FROM gcs(
|
||||
'https://storage.googleapis.com/clickhouse-public-datasets/nyc-taxi/trips_{0..2}.gz',
|
||||
'TabSeparatedWithNames'
|
||||
);
|
||||
|
@ -10,14 +10,14 @@ Columns:
|
||||
- `user` (String) – The user who made the query. Keep in mind that for distributed processing, queries are sent to remote servers under the `default` user. The field contains the username for a specific query, not for a query that this query initiated.
|
||||
- `address` (String) – The IP address the request was made from. The same for distributed processing. To track where a distributed query was originally made from, look at `system.processes` on the query requestor server.
|
||||
- `elapsed` (Float64) – The time in seconds since request execution started.
|
||||
- `rows_read` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers.
|
||||
- `bytes_read` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers.
|
||||
- `read_rows` (UInt64) – The number of rows read from the table. For distributed processing, on the requestor server, this is the total for all remote servers.
|
||||
- `read_bytes` (UInt64) – The number of uncompressed bytes read from the table. For distributed processing, on the requestor server, this is the total for all remote servers.
|
||||
- `total_rows_approx` (UInt64) – The approximation of the total number of rows that should be read. For distributed processing, on the requestor server, this is the total for all remote servers. It can be updated during request processing, when new sources to process become known.
|
||||
- `memory_usage` (UInt64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max_memory_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage) setting.
|
||||
- `memory_usage` (Int64) – Amount of RAM the request uses. It might not include some types of dedicated memory. See the [max_memory_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage) setting.
|
||||
- `query` (String) – The query text. For `INSERT`, it does not include the data to insert.
|
||||
- `query_id` (String) – Query ID, if defined.
|
||||
- `is_cancelled` (Int8) – Was query cancelled.
|
||||
- `is_all_data_sent` (Int8) – Was all data sent to the client (in other words query had been finished on the server).
|
||||
- `is_cancelled` (UInt8) – Was query cancelled.
|
||||
- `is_all_data_sent` (UInt8) – Was all data sent to the client (in other words query had been finished on the server).
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.processes LIMIT 10 FORMAT Vertical;
|
||||
|
28
docs/en/operations/system-tables/user_processes.md
Normal file
28
docs/en/operations/system-tables/user_processes.md
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/user_processes
|
||||
---
|
||||
# user_processes
|
||||
|
||||
This system table can be used to get overview of memory usage and ProfileEvents of users.
|
||||
|
||||
Columns:
|
||||
|
||||
- `user` ([String](../../sql-reference/data-types/string.md)) — User name.
|
||||
- `memory_usage` ([Int64](../../sql-reference/data-types/int-uint#int-ranges)) – Sum of RAM used by all processes of the user. It might not include some types of dedicated memory. See the [max_memory_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage) setting.
|
||||
- `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint#int-ranges)) — The peak of memory usage of the user. It can be reset when no queries are run for the user.
|
||||
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/map)) – Summary of ProfileEvents that measure different metrics for the user. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.user_processes LIMIT 10 FORMAT Vertical;
|
||||
```
|
||||
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
user: default
|
||||
memory_usage: 9832
|
||||
peak_memory_usage: 9832
|
||||
ProfileEvents: {'Query':5,'SelectQuery':5,'QueriesWithSubqueries':38,'SelectQueriesWithSubqueries':38,'QueryTimeMicroseconds':842048,'SelectQueryTimeMicroseconds':842048,'ReadBufferFromFileDescriptorRead':6,'ReadBufferFromFileDescriptorReadBytes':234,'IOBufferAllocs':3,'IOBufferAllocBytes':98493,'ArenaAllocChunks':283,'ArenaAllocBytes':1482752,'FunctionExecute':670,'TableFunctionExecute':16,'DiskReadElapsedMicroseconds':19,'NetworkSendElapsedMicroseconds':684,'NetworkSendBytes':139498,'SelectedRows':6076,'SelectedBytes':685802,'ContextLock':1140,'RWLockAcquiredReadLocks':193,'RWLockReadersWaitMilliseconds':4,'RealTimeMicroseconds':1585163,'UserTimeMicroseconds':889767,'SystemTimeMicroseconds':13630,'SoftPageFaults':1947,'OSCPUWaitMicroseconds':6,'OSCPUVirtualTimeMicroseconds':903251,'OSReadChars':28631,'OSWriteChars':28888,'QueryProfilerRuns':3,'LogTrace':79,'LogDebug':24}
|
||||
|
||||
1 row in set. Elapsed: 0.010 sec.
|
||||
```
|
@ -2280,7 +2280,7 @@ This config consists of a list of regular expression tree nodes. Each node has t
|
||||
- The value of an attribute may contain **back references**, referring to capture groups of the matched regular expression. In the example, the value of attribute `version` in the first node consists of a back-reference `\1` to capture group `(\d+[\.\d]*)` in the regular expression. Back-reference numbers range from 1 to 9 and are written as `$1` or `\1` (for number 1). The back reference is replaced by the matched capture group during query execution.
|
||||
- **child nodes**: a list of children of a regexp tree node, each of which has its own attributes and (potentially) children nodes. String matching proceeds in a depth-first fashion. If a string matches a regexp node, the dictionary checks if it also matches the nodes' child nodes. If that is the case, the attributes of the deepest matching node are assigned. Attributes of a child node overwrite equally named attributes of parent nodes. The name of child nodes in YAML files can be arbitrary, e.g. `versions` in above example.
|
||||
|
||||
Regexp tree dictionaries only allow access using the functions `dictGet` and `dictGetOrDefault`.
|
||||
Regexp tree dictionaries only allow access using the functions `dictGet`, `dictGetOrDefault`, and `dictGetAll`.
|
||||
|
||||
Example:
|
||||
|
||||
@ -2300,6 +2300,67 @@ In this case, we first match the regular expression `\d+/tclwebkit(?:\d+[\.\d]*)
|
||||
|
||||
With a powerful YAML configure file, we can use a regexp tree dictionaries as a user agent string parser. We support [uap-core](https://github.com/ua-parser/uap-core) and demonstrate how to use it in the functional test [02504_regexp_dictionary_ua_parser](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh)
|
||||
|
||||
#### Collecting Attribute Values
|
||||
|
||||
Sometimes it is useful to return values from multiple regular expressions that matched, rather than just the value of a leaf node. In these cases, the specialized [`dictGetAll`](../../sql-reference/functions/ext-dict-functions.md#dictgetall) function can be used. If a node has an attribute value of type `T`, `dictGetAll` will return an `Array(T)` containing zero or more values.
|
||||
|
||||
By default, the number of matches returned per key is unbounded. A bound can be passed as an optional fourth argument to `dictGetAll`. The array is populated in _topological order_, meaning that child nodes come before parent nodes, and sibling nodes follow the ordering in the source.
|
||||
|
||||
Example:
|
||||
|
||||
```sql
|
||||
CREATE DICTIONARY regexp_dict
|
||||
(
|
||||
regexp String,
|
||||
tag String,
|
||||
topological_index Int64,
|
||||
captured Nullable(String),
|
||||
parent String
|
||||
)
|
||||
PRIMARY KEY(regexp)
|
||||
SOURCE(YAMLRegExpTree(PATH '/var/lib/clickhouse/user_files/regexp_tree.yaml'))
|
||||
LAYOUT(regexp_tree)
|
||||
LIFETIME(0)
|
||||
```
|
||||
|
||||
```yaml
|
||||
# /var/lib/clickhouse/user_files/regexp_tree.yaml
|
||||
- regexp: 'clickhouse\.com'
|
||||
tag: 'ClickHouse'
|
||||
topological_index: 1
|
||||
paths:
|
||||
- regexp: 'clickhouse\.com/docs(.*)'
|
||||
tag: 'ClickHouse Documentation'
|
||||
topological_index: 0
|
||||
captured: '\1'
|
||||
parent: 'ClickHouse'
|
||||
|
||||
- regexp: '/docs(/|$)'
|
||||
tag: 'Documentation'
|
||||
topological_index: 2
|
||||
|
||||
- regexp: 'github.com'
|
||||
tag: 'GitHub'
|
||||
topological_index: 3
|
||||
captured: 'NULL'
|
||||
```
|
||||
|
||||
```sql
|
||||
CREATE TABLE urls (url String) ENGINE=MergeTree ORDER BY url;
|
||||
INSERT INTO urls VALUES ('clickhouse.com'), ('clickhouse.com/docs/en'), ('github.com/clickhouse/tree/master/docs');
|
||||
SELECT url, dictGetAll('regexp_dict', ('tag', 'topological_index', 'captured', 'parent'), url, 2) FROM urls;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─url────────────────────────────────────┬─dictGetAll('regexp_dict', ('tag', 'topological_index', 'captured', 'parent'), url, 2)─┐
|
||||
│ clickhouse.com │ (['ClickHouse'],[1],[],[]) │
|
||||
│ clickhouse.com/docs/en │ (['ClickHouse Documentation','ClickHouse'],[0,1],['/en'],['ClickHouse']) │
|
||||
│ github.com/clickhouse/tree/master/docs │ (['Documentation','GitHub'],[2,3],[NULL],[]) │
|
||||
└────────────────────────────────────────┴───────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Use Regular Expression Tree Dictionary in ClickHouse Cloud
|
||||
|
||||
Above used `YAMLRegExpTree` source works in ClickHouse Open Source but not in ClickHouse Cloud. To use regexp tree dictionaries in ClickHouse could, first create a regexp tree dictionary from a YAML file locally in ClickHouse Open Source, then dump this dictionary into a CSV file using the `dictionary` table function and the [INTO OUTFILE](../statements/select/into-outfile.md) clause.
|
||||
|
@ -403,6 +403,84 @@ SELECT dictGetDescendants('hierarchy_flat_dictionary', number, 1) FROM system.nu
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## dictGetAll
|
||||
|
||||
Retrieves the attribute values of all nodes that matched each key in a [regular expression tree dictionary](../../sql-reference/dictionaries/index.md#regexp-tree-dictionary).
|
||||
|
||||
Besides returning values of type `Array(T)` instead of `T`, this function behaves similarly to [`dictGet`](#dictget-dictgetordefault-dictgetornull).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
dictGetAll('dict_name', attr_names, id_expr[, limit])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `dict_name` — Name of the dictionary. [String literal](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `attr_names` — Name of the column of the dictionary, [String literal](../../sql-reference/syntax.md#syntax-string-literal), or tuple of column names, [Tuple](../../sql-reference/data-types/tuple.md)([String literal](../../sql-reference/syntax.md#syntax-string-literal)).
|
||||
- `id_expr` — Key value. [Expression](../../sql-reference/syntax.md#syntax-expressions) returning array of dictionary key-type value or [Tuple](../../sql-reference/data-types/tuple.md)-type value depending on the dictionary configuration.
|
||||
- `limit` - Maximum length for each value array returned. When truncating, child nodes are given precedence over parent nodes, and otherwise the defined list order for the regexp tree dictionary is respected. If unspecified, array length is unlimited.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- If ClickHouse parses the attribute successfully in the attribute’s data type as defined in the dictionary, returns an array of dictionary attribute values that correspond to `id_expr` for each attribute specified by `attr_names`.
|
||||
|
||||
- If there is no key corresponding to `id_expr` in the dictionary, then an empty array is returned.
|
||||
|
||||
ClickHouse throws an exception if it cannot parse the value of the attribute or the value does not match the attribute data type.
|
||||
|
||||
**Example**
|
||||
|
||||
Consider the following regexp tree dictionary:
|
||||
|
||||
```sql
|
||||
CREATE DICTIONARY regexp_dict
|
||||
(
|
||||
regexp String,
|
||||
tag String
|
||||
)
|
||||
PRIMARY KEY(regexp)
|
||||
SOURCE(YAMLRegExpTree(PATH '/var/lib/clickhouse/user_files/regexp_tree.yaml'))
|
||||
LAYOUT(regexp_tree)
|
||||
...
|
||||
```
|
||||
|
||||
```yaml
|
||||
# /var/lib/clickhouse/user_files/regexp_tree.yaml
|
||||
- regexp: 'foo'
|
||||
tag: 'foo_attr'
|
||||
- regexp: 'bar'
|
||||
tag: 'bar_attr'
|
||||
- regexp: 'baz'
|
||||
tag: 'baz_attr'
|
||||
```
|
||||
|
||||
Get all matching values:
|
||||
|
||||
```sql
|
||||
SELECT dictGetAll('regexp_dict', 'tag', 'foobarbaz');
|
||||
```
|
||||
|
||||
```text
|
||||
┌─dictGetAll('regexp_dict', 'tag', 'foobarbaz')─┐
|
||||
│ ['foo_attr','bar_attr','baz_attr'] │
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Get up to 2 matching values:
|
||||
|
||||
```sql
|
||||
SELECT dictGetAll('regexp_dict', 'tag', 'foobarbaz', 2);
|
||||
```
|
||||
|
||||
```text
|
||||
┌─dictGetAll('regexp_dict', 'tag', 'foobarbaz', 2)─┐
|
||||
│ ['foo_attr','bar_attr'] │
|
||||
└──────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Other Functions
|
||||
|
||||
ClickHouse supports specialized functions that convert dictionary attribute values to a specific data type regardless of the dictionary configuration.
|
||||
|
@ -10,7 +10,9 @@ There are at least\* two types of functions - regular functions (they are just c
|
||||
|
||||
In this section we discuss regular functions. For aggregate functions, see the section “Aggregate functions”.
|
||||
|
||||
\* - There is a third type of function that the ‘arrayJoin’ function belongs to; table functions can also be mentioned separately.\*
|
||||
:::note
|
||||
There is a third type of function that the [‘arrayJoin’ function](/docs/en/sql-reference/functions/array-join.md) belongs to. And [table functions](/docs/en/sql-reference/table-functions/index.md) can also be mentioned separately.
|
||||
:::
|
||||
|
||||
## Strong Typing
|
||||
|
||||
|
@ -130,15 +130,31 @@ void LocalServer::initialize(Poco::Util::Application & self)
|
||||
});
|
||||
#endif
|
||||
|
||||
IOThreadPool::initialize(
|
||||
getIOThreadPool().initialize(
|
||||
config().getUInt("max_io_thread_pool_size", 100),
|
||||
config().getUInt("max_io_thread_pool_free_size", 0),
|
||||
config().getUInt("io_thread_pool_queue_size", 10000));
|
||||
|
||||
OutdatedPartsLoadingThreadPool::initialize(
|
||||
config().getUInt("max_outdated_parts_loading_thread_pool_size", 16),
|
||||
|
||||
const size_t active_parts_loading_threads = config().getUInt("max_active_parts_loading_thread_pool_size", 64);
|
||||
getActivePartsLoadingThreadPool().initialize(
|
||||
active_parts_loading_threads,
|
||||
0, // We don't need any threads one all the parts will be loaded
|
||||
config().getUInt("max_outdated_parts_loading_thread_pool_size", 16));
|
||||
active_parts_loading_threads);
|
||||
|
||||
const size_t outdated_parts_loading_threads = config().getUInt("max_outdated_parts_loading_thread_pool_size", 32);
|
||||
getOutdatedPartsLoadingThreadPool().initialize(
|
||||
outdated_parts_loading_threads,
|
||||
0, // We don't need any threads one all the parts will be loaded
|
||||
outdated_parts_loading_threads);
|
||||
|
||||
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(active_parts_loading_threads);
|
||||
|
||||
const size_t cleanup_threads = config().getUInt("max_parts_cleaning_thread_pool_size", 128);
|
||||
getPartsCleaningThreadPool().initialize(
|
||||
cleanup_threads,
|
||||
0, // We don't need any threads one all the parts will be deleted
|
||||
cleanup_threads);
|
||||
}
|
||||
|
||||
|
||||
|
@ -683,21 +683,36 @@ try
|
||||
});
|
||||
#endif
|
||||
|
||||
IOThreadPool::initialize(
|
||||
getIOThreadPool().initialize(
|
||||
server_settings.max_io_thread_pool_size,
|
||||
server_settings.max_io_thread_pool_free_size,
|
||||
server_settings.io_thread_pool_queue_size);
|
||||
|
||||
BackupsIOThreadPool::initialize(
|
||||
getBackupsIOThreadPool().initialize(
|
||||
server_settings.max_backups_io_thread_pool_size,
|
||||
server_settings.max_backups_io_thread_pool_free_size,
|
||||
server_settings.backups_io_thread_pool_queue_size);
|
||||
|
||||
OutdatedPartsLoadingThreadPool::initialize(
|
||||
getActivePartsLoadingThreadPool().initialize(
|
||||
server_settings.max_active_parts_loading_thread_pool_size,
|
||||
0, // We don't need any threads once all the parts will be loaded
|
||||
server_settings.max_active_parts_loading_thread_pool_size);
|
||||
|
||||
getOutdatedPartsLoadingThreadPool().initialize(
|
||||
server_settings.max_outdated_parts_loading_thread_pool_size,
|
||||
0, // We don't need any threads one all the parts will be loaded
|
||||
0, // We don't need any threads once all the parts will be loaded
|
||||
server_settings.max_outdated_parts_loading_thread_pool_size);
|
||||
|
||||
/// It could grow if we need to synchronously wait until all the data parts will be loaded.
|
||||
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(
|
||||
server_settings.max_active_parts_loading_thread_pool_size
|
||||
);
|
||||
|
||||
getPartsCleaningThreadPool().initialize(
|
||||
server_settings.max_parts_cleaning_thread_pool_size,
|
||||
0, // We don't need any threads one all the parts will be deleted
|
||||
server_settings.max_parts_cleaning_thread_pool_size);
|
||||
|
||||
/// Initialize global local cache for remote filesystem.
|
||||
if (config().has("local_cache_for_remote_fs"))
|
||||
{
|
||||
@ -1226,6 +1241,36 @@ try
|
||||
global_context->getMessageBrokerSchedulePool().increaseThreadsCount(server_settings_.background_message_broker_schedule_pool_size);
|
||||
global_context->getDistributedSchedulePool().increaseThreadsCount(server_settings_.background_distributed_schedule_pool_size);
|
||||
|
||||
getIOThreadPool().reloadConfiguration(
|
||||
server_settings.max_io_thread_pool_size,
|
||||
server_settings.max_io_thread_pool_free_size,
|
||||
server_settings.io_thread_pool_queue_size);
|
||||
|
||||
getBackupsIOThreadPool().reloadConfiguration(
|
||||
server_settings.max_backups_io_thread_pool_size,
|
||||
server_settings.max_backups_io_thread_pool_free_size,
|
||||
server_settings.backups_io_thread_pool_queue_size);
|
||||
|
||||
getActivePartsLoadingThreadPool().reloadConfiguration(
|
||||
server_settings.max_active_parts_loading_thread_pool_size,
|
||||
0, // We don't need any threads once all the parts will be loaded
|
||||
server_settings.max_active_parts_loading_thread_pool_size);
|
||||
|
||||
getOutdatedPartsLoadingThreadPool().reloadConfiguration(
|
||||
server_settings.max_outdated_parts_loading_thread_pool_size,
|
||||
0, // We don't need any threads once all the parts will be loaded
|
||||
server_settings.max_outdated_parts_loading_thread_pool_size);
|
||||
|
||||
/// It could grow if we need to synchronously wait until all the data parts will be loaded.
|
||||
getOutdatedPartsLoadingThreadPool().setMaxTurboThreads(
|
||||
server_settings.max_active_parts_loading_thread_pool_size
|
||||
);
|
||||
|
||||
getPartsCleaningThreadPool().reloadConfiguration(
|
||||
server_settings.max_parts_cleaning_thread_pool_size,
|
||||
0, // We don't need any threads one all the parts will be deleted
|
||||
server_settings.max_parts_cleaning_thread_pool_size);
|
||||
|
||||
if (config->has("resources"))
|
||||
{
|
||||
global_context->getResourceManager()->updateConfiguration(*config);
|
||||
|
@ -161,7 +161,7 @@ void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_s
|
||||
/* dest_key= */ blob_path[0],
|
||||
request_settings,
|
||||
object_attributes,
|
||||
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupReaderS3"),
|
||||
threadPoolCallbackRunner<void>(getBackupsIOThreadPool().get(), "BackupReaderS3"),
|
||||
/* for_disk_s3= */ true);
|
||||
|
||||
return file_size;
|
||||
@ -212,7 +212,7 @@ void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src
|
||||
fs::path(s3_uri.key) / path_in_backup,
|
||||
request_settings,
|
||||
{},
|
||||
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
|
||||
threadPoolCallbackRunner<void>(getBackupsIOThreadPool().get(), "BackupWriterS3"));
|
||||
return; /// copied!
|
||||
}
|
||||
}
|
||||
@ -224,7 +224,7 @@ void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src
|
||||
void BackupWriterS3::copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length)
|
||||
{
|
||||
copyDataToS3File(create_read_buffer, start_pos, length, client, s3_uri.bucket, fs::path(s3_uri.key) / path_in_backup, request_settings, {},
|
||||
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
|
||||
threadPoolCallbackRunner<void>(getBackupsIOThreadPool().get(), "BackupWriterS3"));
|
||||
}
|
||||
|
||||
BackupWriterS3::~BackupWriterS3() = default;
|
||||
@ -258,7 +258,7 @@ std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
|
||||
DBMS_DEFAULT_BUFFER_SIZE,
|
||||
request_settings,
|
||||
std::nullopt,
|
||||
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"),
|
||||
threadPoolCallbackRunner<void>(getBackupsIOThreadPool().get(), "BackupWriterS3"),
|
||||
write_settings);
|
||||
}
|
||||
|
||||
|
@ -278,7 +278,7 @@ public:
|
||||
static Int32 cancelled_status() { return exit_after_signals.load(); }
|
||||
};
|
||||
|
||||
/// This signal handler is set only for SIGINT.
|
||||
/// This signal handler is set for SIGINT and SIGQUIT.
|
||||
void interruptSignalHandler(int signum)
|
||||
{
|
||||
if (QueryInterruptHandler::try_stop())
|
||||
@ -317,6 +317,9 @@ void ClientBase::setupSignalHandler()
|
||||
|
||||
if (sigaction(SIGINT, &new_act, nullptr))
|
||||
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
|
||||
if (sigaction(SIGQUIT, &new_act, nullptr))
|
||||
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
}
|
||||
|
||||
|
||||
|
@ -131,8 +131,14 @@
|
||||
M(DistributedInsertThreadsActive, "Number of threads used for INSERT into Distributed running a task.") \
|
||||
M(StorageS3Threads, "Number of threads in the StorageS3 thread pool.") \
|
||||
M(StorageS3ThreadsActive, "Number of threads in the StorageS3 thread pool running a task.") \
|
||||
M(ObjectStorageS3Threads, "Number of threads in the S3ObjectStorage thread pool.") \
|
||||
M(ObjectStorageS3ThreadsActive, "Number of threads in the S3ObjectStorage thread pool running a task.") \
|
||||
M(ObjectStorageAzureThreads, "Number of threads in the AzureObjectStorage thread pool.") \
|
||||
M(ObjectStorageAzureThreadsActive, "Number of threads in the AzureObjectStorage thread pool running a task.") \
|
||||
M(MergeTreePartsLoaderThreads, "Number of threads in the MergeTree parts loader thread pool.") \
|
||||
M(MergeTreePartsLoaderThreadsActive, "Number of threads in the MergeTree parts loader thread pool running a task.") \
|
||||
M(MergeTreeOutdatedPartsLoaderThreads, "Number of threads in the threadpool for loading Outdated data parts.") \
|
||||
M(MergeTreeOutdatedPartsLoaderThreadsActive, "Number of active threads in the threadpool for loading Outdated data parts.") \
|
||||
M(MergeTreePartsCleanerThreads, "Number of threads in the MergeTree parts cleaner thread pool.") \
|
||||
M(MergeTreePartsCleanerThreadsActive, "Number of threads in the MergeTree parts cleaner thread pool running a task.") \
|
||||
M(SystemReplicasThreads, "Number of threads in the system.replicas thread pool.") \
|
||||
|
@ -21,7 +21,9 @@ namespace DB
|
||||
M(UInt64, max_io_thread_pool_size, 100, "The maximum number of threads that would be used for IO operations", 0) \
|
||||
M(UInt64, max_io_thread_pool_free_size, 0, "Max free size for IO thread pool.", 0) \
|
||||
M(UInt64, io_thread_pool_queue_size, 10000, "Queue size for IO thread pool.", 0) \
|
||||
M(UInt64, max_outdated_parts_loading_thread_pool_size, 32, "The maximum number of threads that would be used for loading outdated data parts on startup", 0) \
|
||||
M(UInt64, max_active_parts_loading_thread_pool_size, 64, "The number of threads to load active set of data parts (Active ones) at startup.", 0) \
|
||||
M(UInt64, max_outdated_parts_loading_thread_pool_size, 32, "The number of threads to load inactive set of data parts (Outdated ones) at startup.", 0) \
|
||||
M(UInt64, max_parts_cleaning_thread_pool_size, 128, "The number of threads for concurrent removal of inactive data parts.", 0) \
|
||||
M(UInt64, max_replicated_fetches_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_replicated_sends_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \
|
||||
M(UInt64, max_remote_read_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for read. Zero means unlimited.", 0) \
|
||||
|
@ -51,19 +51,11 @@ void SerializationUUID::deserializeTextQuoted(IColumn & column, ReadBuffer & ist
|
||||
{
|
||||
assertChar('\'', istr);
|
||||
char * next_pos = find_first_symbols<'\\', '\''>(istr.position(), istr.buffer().end());
|
||||
size_t len = next_pos - istr.position();
|
||||
if ((len == 32) && (istr.position()[32] == '\''))
|
||||
const size_t len = next_pos - istr.position();
|
||||
if ((len == 32 || len == 36) && istr.position()[len] == '\'')
|
||||
{
|
||||
parseUUIDWithoutSeparator(
|
||||
reinterpret_cast<const UInt8 *>(istr.position()), std::reverse_iterator<UInt8 *>(reinterpret_cast<UInt8 *>(&uuid) + 16));
|
||||
istr.ignore(33);
|
||||
fast = true;
|
||||
}
|
||||
else if ((len == 36) && (istr.position()[36] == '\''))
|
||||
{
|
||||
parseUUID(
|
||||
reinterpret_cast<const UInt8 *>(istr.position()), std::reverse_iterator<UInt8 *>(reinterpret_cast<UInt8 *>(&uuid) + 16));
|
||||
istr.ignore(37);
|
||||
uuid = parseUUID(std::span(reinterpret_cast<const UInt8 *>(istr.position()), len));
|
||||
istr.ignore(len + 1);
|
||||
fast = true;
|
||||
}
|
||||
else
|
||||
|
@ -207,6 +207,51 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Analogous to getColumn, but for dictGetAll
|
||||
*/
|
||||
virtual ColumnPtr getColumnAllValues(
|
||||
const std::string & attribute_name [[maybe_unused]],
|
||||
const DataTypePtr & result_type [[maybe_unused]],
|
||||
const Columns & key_columns [[maybe_unused]],
|
||||
const DataTypes & key_types [[maybe_unused]],
|
||||
const ColumnPtr & default_values_column [[maybe_unused]],
|
||||
size_t limit [[maybe_unused]]) const
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED,
|
||||
"Method getColumnAllValues is not supported for {} dictionary.",
|
||||
getDictionaryID().getNameForLogs());
|
||||
}
|
||||
|
||||
/**
|
||||
* Analogous to getColumns, but for dictGetAll
|
||||
*/
|
||||
virtual Columns getColumnsAllValues(
|
||||
const Strings & attribute_names,
|
||||
const DataTypes & result_types,
|
||||
const Columns & key_columns,
|
||||
const DataTypes & key_types,
|
||||
const Columns & default_values_columns,
|
||||
size_t limit) const
|
||||
{
|
||||
size_t attribute_names_size = attribute_names.size();
|
||||
|
||||
Columns result;
|
||||
result.reserve(attribute_names_size);
|
||||
|
||||
for (size_t i = 0; i < attribute_names_size; ++i)
|
||||
{
|
||||
const auto & attribute_name = attribute_names[i];
|
||||
const auto & result_type = result_types[i];
|
||||
const auto & default_values_column = default_values_columns[i];
|
||||
|
||||
result.emplace_back(getColumnAllValues(
|
||||
attribute_name, result_type, key_columns, key_types, default_values_column, limit));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Subclass must validate key columns and key types and return ColumnUInt8 that
|
||||
* is bitmask representation of is key in dictionary or not.
|
||||
* If key is in dictionary then value of associated row will be 1, otherwise 0.
|
||||
|
@ -70,7 +70,7 @@ namespace
|
||||
explicit StringPiece(int ref_) : ref_num(ref_) {}
|
||||
};
|
||||
|
||||
Field parseStringToField(const String & raw, DataTypePtr data_type)
|
||||
Field parseStringToField(const String & raw, const DataTypePtr data_type)
|
||||
try
|
||||
{
|
||||
ReadBufferFromString buffer(raw);
|
||||
@ -419,6 +419,65 @@ RegExpTreeDictionary::RegExpTreeDictionary(
|
||||
calculateBytesAllocated();
|
||||
}
|
||||
|
||||
// Thin wrapper around unordered_map<String, Field> that manages the collection of attribute values subject to the
|
||||
// behavior specified by collect_values_limit
|
||||
class RegExpTreeDictionary::AttributeCollector : public std::unordered_map<String, Field>
|
||||
{
|
||||
private:
|
||||
std::optional<size_t> collect_values_limit; // std::nullopt means single-value mode, i.e. don't collect
|
||||
size_t n_full_attributes;
|
||||
|
||||
public:
|
||||
explicit AttributeCollector(std::optional<size_t> collect_values_limit_)
|
||||
: collect_values_limit(collect_values_limit_), n_full_attributes(0)
|
||||
{
|
||||
}
|
||||
|
||||
constexpr bool collecting() const { return collect_values_limit != std::nullopt; }
|
||||
|
||||
// Add a name-value pair to the collection if there's space
|
||||
void add(const String & attr_name, Field field)
|
||||
{
|
||||
if (collect_values_limit)
|
||||
{
|
||||
if (!this->contains(attr_name))
|
||||
(*this)[attr_name] = Array();
|
||||
|
||||
Array & values = (*this)[attr_name].safeGet<Array &>();
|
||||
if (values.size() < *collect_values_limit)
|
||||
{
|
||||
values.push_back(std::move(field));
|
||||
if (values.size() == *collect_values_limit)
|
||||
n_full_attributes++;
|
||||
}
|
||||
}
|
||||
else if (!this->contains(attr_name))
|
||||
{
|
||||
(*this)[attr_name] = std::move(field);
|
||||
n_full_attributes++;
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if no more values can be added for a given attribute
|
||||
inline bool full(const String & attr_name) const
|
||||
{
|
||||
if (collect_values_limit)
|
||||
{
|
||||
auto it = this->find(attr_name);
|
||||
if (it == this->end())
|
||||
return false;
|
||||
return it->second.safeGet<const Array &>().size() >= *collect_values_limit;
|
||||
}
|
||||
else
|
||||
{
|
||||
return this->contains(attr_name);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the number of full attributes
|
||||
inline size_t attributesFull() const { return n_full_attributes; }
|
||||
};
|
||||
|
||||
std::pair<String, bool> processBackRefs(const String & data, const re2_st::RE2 & searcher, const std::vector<StringPiece> & pieces)
|
||||
{
|
||||
re2_st::StringPiece haystack(data.data(), data.size());
|
||||
@ -442,7 +501,7 @@ std::pair<String, bool> processBackRefs(const String & data, const re2_st::RE2 &
|
||||
// The return value means whether we finish collecting.
|
||||
bool RegExpTreeDictionary::setAttributes(
|
||||
UInt64 id,
|
||||
std::unordered_map<String, Field> & attributes_to_set,
|
||||
AttributeCollector & attributes_to_set,
|
||||
const String & data,
|
||||
std::unordered_set<UInt64> & visited_nodes,
|
||||
const std::unordered_map<String, const DictionaryAttribute &> & attributes,
|
||||
@ -451,34 +510,43 @@ bool RegExpTreeDictionary::setAttributes(
|
||||
{
|
||||
|
||||
if (visited_nodes.contains(id))
|
||||
return attributes_to_set.size() == attributes.size();
|
||||
return attributes_to_set.attributesFull() == attributes.size();
|
||||
visited_nodes.emplace(id);
|
||||
const auto & node_attributes = regex_nodes.at(id)->attributes;
|
||||
for (const auto & [name_, value] : node_attributes)
|
||||
{
|
||||
if (!attributes.contains(name_) || attributes_to_set.contains(name_))
|
||||
if (!attributes.contains(name_) || attributes_to_set.full(name_))
|
||||
continue;
|
||||
|
||||
if (value.containsBackRefs())
|
||||
{
|
||||
auto [updated_str, use_default] = processBackRefs(data, regex_nodes.at(id)->searcher, value.pieces);
|
||||
if (use_default)
|
||||
{
|
||||
DefaultValueProvider default_value(attributes.at(name_).null_value, defaults.at(name_));
|
||||
attributes_to_set[name_] = default_value.getDefaultValue(key_index);
|
||||
// Back-ref processing failed.
|
||||
// - If not collecting values, set the default value immediately while we're still on this node.
|
||||
// Otherwise, a value from a different node could take its place before we set it to the default value post-walk.
|
||||
// - If collecting values, don't add anything. If we find no other matches for this attribute,
|
||||
// then we'll set its value to the default Array value later.
|
||||
if (!attributes_to_set.collecting())
|
||||
{
|
||||
DefaultValueProvider default_value(attributes.at(name_).null_value, defaults.at(name_));
|
||||
attributes_to_set.add(name_, default_value.getDefaultValue(key_index));
|
||||
}
|
||||
}
|
||||
else
|
||||
attributes_to_set[name_] = parseStringToField(updated_str, attributes.at(name_).type);
|
||||
attributes_to_set.add(name_, parseStringToField(updated_str, attributes.at(name_).type));
|
||||
}
|
||||
else
|
||||
attributes_to_set[name_] = value.field;
|
||||
attributes_to_set.add(name_, value.field);
|
||||
}
|
||||
|
||||
auto parent_id = regex_nodes.at(id)->parent_id;
|
||||
if (parent_id > 0)
|
||||
setAttributes(parent_id, attributes_to_set, data, visited_nodes, attributes, defaults, key_index);
|
||||
|
||||
/// if all the attributes have set, the walking through can be stopped.
|
||||
return attributes_to_set.size() == attributes.size();
|
||||
/// if all attributes are full, we can stop walking the tree
|
||||
return attributes_to_set.attributesFull() == attributes.size();
|
||||
}
|
||||
|
||||
/// a temp struct to store all the matched result.
|
||||
@ -550,7 +618,8 @@ std::unordered_map<String, ColumnPtr> RegExpTreeDictionary::match(
|
||||
const ColumnString::Chars & keys_data,
|
||||
const ColumnString::Offsets & keys_offsets,
|
||||
const std::unordered_map<String, const DictionaryAttribute &> & attributes,
|
||||
const std::unordered_map<String, ColumnPtr> & defaults) const
|
||||
const std::unordered_map<String, ColumnPtr> & defaults,
|
||||
std::optional<size_t> collect_values_limit) const
|
||||
{
|
||||
|
||||
#if USE_VECTORSCAN
|
||||
@ -573,7 +642,7 @@ std::unordered_map<String, ColumnPtr> RegExpTreeDictionary::match(
|
||||
/// initialize columns
|
||||
for (const auto & [name_, attr] : attributes)
|
||||
{
|
||||
auto col_ptr = attr.type->createColumn();
|
||||
auto col_ptr = (collect_values_limit ? std::make_shared<DataTypeArray>(attr.type) : attr.type)->createColumn();
|
||||
col_ptr->reserve(keys_offsets.size());
|
||||
columns[name_] = std::move(col_ptr);
|
||||
}
|
||||
@ -630,11 +699,11 @@ std::unordered_map<String, ColumnPtr> RegExpTreeDictionary::match(
|
||||
|
||||
match_result.sort();
|
||||
/// Walk through the regex tree util all attributes are set;
|
||||
std::unordered_map<String, Field> attributes_to_set;
|
||||
AttributeCollector attributes_to_set{collect_values_limit};
|
||||
std::unordered_set<UInt64> visited_nodes;
|
||||
|
||||
/// Some node matches but its parents cannot match. In this case we must regard this node unmatched.
|
||||
auto is_invalid = [&](UInt64 id)
|
||||
auto is_valid = [&](UInt64 id)
|
||||
{
|
||||
while (id)
|
||||
{
|
||||
@ -650,7 +719,7 @@ std::unordered_map<String, ColumnPtr> RegExpTreeDictionary::match(
|
||||
for (auto item : match_result.matched_idx_sorted_list)
|
||||
{
|
||||
UInt64 id = item.second;
|
||||
if (!is_invalid(id))
|
||||
if (!is_valid(id))
|
||||
continue;
|
||||
if (visited_nodes.contains(id))
|
||||
continue;
|
||||
@ -663,7 +732,8 @@ std::unordered_map<String, ColumnPtr> RegExpTreeDictionary::match(
|
||||
if (attributes_to_set.contains(name_))
|
||||
continue;
|
||||
|
||||
DefaultValueProvider default_value(attr.null_value, defaults.at(name_));
|
||||
DefaultValueProvider default_value(
|
||||
collect_values_limit ? DataTypeArray(attr.type).getDefault() : attr.null_value, defaults.at(name_));
|
||||
columns[name_]->insert(default_value.getDefaultValue(key_idx));
|
||||
}
|
||||
|
||||
@ -727,12 +797,13 @@ Pipe RegExpTreeDictionary::read(const Names & , size_t max_block_size, size_t) c
|
||||
return Pipe(std::make_shared<BlocksListSource>(std::move(result)));
|
||||
}
|
||||
|
||||
Columns RegExpTreeDictionary::getColumns(
|
||||
Columns RegExpTreeDictionary::getColumnsImpl(
|
||||
const Strings & attribute_names,
|
||||
const DataTypes & result_types,
|
||||
const Columns & key_columns,
|
||||
const DataTypes & key_types,
|
||||
const Columns & default_values_columns) const
|
||||
const Columns & default_values_columns,
|
||||
std::optional<size_t> collect_values_limit) const
|
||||
{
|
||||
/// valid check
|
||||
if (key_columns.size() != 1)
|
||||
@ -746,7 +817,17 @@ Columns RegExpTreeDictionary::getColumns(
|
||||
|
||||
for (size_t i = 0; i < attribute_names.size(); i++)
|
||||
{
|
||||
const auto & attribute = structure.getAttribute(attribute_names[i], result_types[i]);
|
||||
DataTypePtr attribute_type = result_types[i];
|
||||
if (collect_values_limit)
|
||||
{
|
||||
if (!WhichDataType(attribute_type).isArray())
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR, "Expected Array result type for attribute `{}`, got `{}`",
|
||||
attribute_names[i],
|
||||
attribute_type->getName());
|
||||
attribute_type = assert_cast<const DataTypeArray &>(*attribute_type).getNestedType();
|
||||
}
|
||||
const auto & attribute = structure.getAttribute(attribute_names[i], attribute_type);
|
||||
attributes.emplace(attribute.name, attribute);
|
||||
defaults[attribute.name] = default_values_columns[i];
|
||||
}
|
||||
@ -757,7 +838,8 @@ Columns RegExpTreeDictionary::getColumns(
|
||||
key_column->getChars(),
|
||||
key_column->getOffsets(),
|
||||
attributes,
|
||||
defaults);
|
||||
defaults,
|
||||
collect_values_limit);
|
||||
|
||||
Columns result;
|
||||
for (const String & name_ : attribute_names)
|
||||
|
@ -101,16 +101,50 @@ public:
|
||||
const Columns & key_columns,
|
||||
const DataTypes & key_types,
|
||||
const ColumnPtr & default_values_column) const override
|
||||
{
|
||||
return getColumns(Strings({attribute_name}), DataTypes({result_type}), key_columns, key_types, Columns({default_values_column}))[0];
|
||||
}
|
||||
{
|
||||
return getColumns(Strings({attribute_name}), DataTypes({result_type}), key_columns, key_types, Columns({default_values_column}))[0];
|
||||
}
|
||||
|
||||
Columns getColumns(
|
||||
const Strings & attribute_names,
|
||||
const DataTypes & result_types,
|
||||
const Columns & key_columns,
|
||||
const DataTypes & key_types,
|
||||
const Columns & default_values_columns) const override;
|
||||
const Columns & default_values_columns) const override
|
||||
{
|
||||
return getColumnsImpl(attribute_names, result_types, key_columns, key_types, default_values_columns, std::nullopt);
|
||||
}
|
||||
|
||||
ColumnPtr getColumnAllValues(
|
||||
const std::string & attribute_name,
|
||||
const DataTypePtr & result_type,
|
||||
const Columns & key_columns,
|
||||
const DataTypes & key_types,
|
||||
const ColumnPtr & default_values_column,
|
||||
size_t limit) const override
|
||||
{
|
||||
return getColumnsAllValues(
|
||||
Strings({attribute_name}), DataTypes({result_type}), key_columns, key_types, Columns({default_values_column}), limit)[0];
|
||||
}
|
||||
|
||||
Columns getColumnsAllValues(
|
||||
const Strings & attribute_names,
|
||||
const DataTypes & result_types,
|
||||
const Columns & key_columns,
|
||||
const DataTypes & key_types,
|
||||
const Columns & default_values_columns,
|
||||
size_t limit) const override
|
||||
{
|
||||
return getColumnsImpl(attribute_names, result_types, key_columns, key_types, default_values_columns, limit);
|
||||
}
|
||||
|
||||
Columns getColumnsImpl(
|
||||
const Strings & attribute_names,
|
||||
const DataTypes & result_types,
|
||||
const Columns & key_columns,
|
||||
const DataTypes & key_types,
|
||||
const Columns & default_values_columns,
|
||||
std::optional<size_t> collect_values_limit) const;
|
||||
|
||||
private:
|
||||
const DictionaryStructure structure;
|
||||
@ -137,11 +171,14 @@ private:
|
||||
const ColumnString::Chars & keys_data,
|
||||
const ColumnString::Offsets & keys_offsets,
|
||||
const std::unordered_map<String, const DictionaryAttribute &> & attributes,
|
||||
const std::unordered_map<String, ColumnPtr> & defaults) const;
|
||||
const std::unordered_map<String, ColumnPtr> & defaults,
|
||||
std::optional<size_t> collect_values_limit) const;
|
||||
|
||||
class AttributeCollector;
|
||||
|
||||
bool setAttributes(
|
||||
UInt64 id,
|
||||
std::unordered_map<String, Field> & attributes_to_set,
|
||||
AttributeCollector & attributes_to_set,
|
||||
const String & data,
|
||||
std::unordered_set<UInt64> & visited_nodes,
|
||||
const std::unordered_map<String, const DictionaryAttribute &> & attributes,
|
||||
|
@ -1219,7 +1219,7 @@ off_t CachedOnDiskReadBufferFromFile::getPosition()
|
||||
|
||||
void CachedOnDiskReadBufferFromFile::assertCorrectness() const
|
||||
{
|
||||
if (!CachedObjectStorage::canUseReadThroughCache()
|
||||
if (!CachedObjectStorage::canUseReadThroughCache(settings)
|
||||
&& !settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache usage is not allowed (query_id: {})", query_id);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather(
|
||||
|
||||
with_cache = settings.remote_fs_cache
|
||||
&& settings.enable_filesystem_cache
|
||||
&& (!query_id.empty() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache);
|
||||
&& (!query_id.empty() || settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache || !settings.avoid_readthrough_cache_outside_query_context);
|
||||
}
|
||||
|
||||
SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(const StoredObject & object)
|
||||
|
@ -11,9 +11,16 @@
|
||||
#include <Disks/IO/AsynchronousBoundedReadBuffer.h>
|
||||
|
||||
#include <Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageAuth.h>
|
||||
#include <Disks/ObjectStorages/ObjectStorageIteratorAsync.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric ObjectStorageAzureThreads;
|
||||
extern const Metric ObjectStorageAzureThreadsActive;
|
||||
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -26,6 +33,60 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class AzureIteratorAsync final : public IObjectStorageIteratorAsync
|
||||
{
|
||||
public:
|
||||
AzureIteratorAsync(
|
||||
const std::string & path_prefix,
|
||||
std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient> client_,
|
||||
size_t max_list_size)
|
||||
: IObjectStorageIteratorAsync(
|
||||
CurrentMetrics::ObjectStorageAzureThreads,
|
||||
CurrentMetrics::ObjectStorageAzureThreadsActive,
|
||||
"ListObjectAzure")
|
||||
, client(client_)
|
||||
{
|
||||
|
||||
options.Prefix = path_prefix;
|
||||
options.PageSizeHint = static_cast<int>(max_list_size);
|
||||
}
|
||||
|
||||
private:
|
||||
bool getBatchAndCheckNext(RelativePathsWithMetadata & batch) override
|
||||
{
|
||||
auto outcome = client->ListBlobs(options);
|
||||
auto blob_list_response = client->ListBlobs(options);
|
||||
auto blobs_list = blob_list_response.Blobs;
|
||||
|
||||
for (const auto & blob : blobs_list)
|
||||
{
|
||||
batch.emplace_back(
|
||||
blob.Name,
|
||||
ObjectMetadata{
|
||||
static_cast<uint64_t>(blob.BlobSize),
|
||||
Poco::Timestamp::fromEpochTime(
|
||||
std::chrono::duration_cast<std::chrono::seconds>(
|
||||
blob.Details.LastModified.time_since_epoch()).count()),
|
||||
{}});
|
||||
}
|
||||
|
||||
options.ContinuationToken = blob_list_response.NextPageToken;
|
||||
if (blob_list_response.HasPage())
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient> client;
|
||||
Azure::Storage::Blobs::ListBlobsOptions options;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
AzureObjectStorage::AzureObjectStorage(
|
||||
const String & name_,
|
||||
AzureClientPtr && client_,
|
||||
@ -67,6 +128,14 @@ bool AzureObjectStorage::exists(const StoredObject & object) const
|
||||
return false;
|
||||
}
|
||||
|
||||
ObjectStorageIteratorPtr AzureObjectStorage::iterate(const std::string & path_prefix) const
|
||||
{
|
||||
auto settings_ptr = settings.get();
|
||||
auto client_ptr = client.get();
|
||||
|
||||
return std::make_shared<AzureIteratorAsync>(path_prefix, client_ptr, settings_ptr->list_object_keys_size);
|
||||
}
|
||||
|
||||
void AzureObjectStorage::listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const
|
||||
{
|
||||
auto client_ptr = client.get();
|
||||
|
@ -60,6 +60,8 @@ public:
|
||||
|
||||
void listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const override;
|
||||
|
||||
ObjectStorageIteratorPtr iterate(const std::string & path_prefix) const override;
|
||||
|
||||
DataSourceDescription getDataSourceDescription() const override { return data_source_description; }
|
||||
|
||||
std::string getName() const override { return "AzureObjectStorage"; }
|
||||
|
@ -57,7 +57,7 @@ ReadSettings CachedObjectStorage::patchSettings(const ReadSettings & read_settin
|
||||
ReadSettings modified_settings{read_settings};
|
||||
modified_settings.remote_fs_cache = cache;
|
||||
|
||||
if (!canUseReadThroughCache())
|
||||
if (!canUseReadThroughCache(read_settings))
|
||||
modified_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true;
|
||||
|
||||
return object_storage->patchSettings(modified_settings);
|
||||
@ -227,8 +227,11 @@ String CachedObjectStorage::getObjectsNamespace() const
|
||||
return object_storage->getObjectsNamespace();
|
||||
}
|
||||
|
||||
bool CachedObjectStorage::canUseReadThroughCache()
|
||||
bool CachedObjectStorage::canUseReadThroughCache(const ReadSettings & settings)
|
||||
{
|
||||
if (!settings.avoid_readthrough_cache_outside_query_context)
|
||||
return true;
|
||||
|
||||
return CurrentThread::isInitialized()
|
||||
&& CurrentThread::get().getQueryContext()
|
||||
&& !CurrentThread::getQueryId().empty();
|
||||
|
@ -112,7 +112,9 @@ public:
|
||||
|
||||
WriteSettings getAdjustedSettingsFromMetadataFile(const WriteSettings & settings, const std::string & path) const override;
|
||||
|
||||
static bool canUseReadThroughCache();
|
||||
const FileCacheSettings & getCacheSettings() const { return cache_settings; }
|
||||
|
||||
static bool canUseReadThroughCache(const ReadSettings & settings);
|
||||
|
||||
private:
|
||||
FileCache::Key getCacheKey(const std::string & path) const;
|
||||
|
@ -596,7 +596,8 @@ void DiskObjectStorage::writeFileUsingBlobWritingFunction(const String & path, W
|
||||
{
|
||||
LOG_TEST(log, "Write file: {}", path);
|
||||
auto transaction = createObjectStorageTransaction();
|
||||
return transaction->writeFileUsingBlobWritingFunction(path, mode, std::move(write_blob_function));
|
||||
transaction->writeFileUsingBlobWritingFunction(path, mode, std::move(write_blob_function));
|
||||
transaction->commit();
|
||||
}
|
||||
|
||||
void DiskObjectStorage::applyNewSettings(
|
||||
|
@ -710,8 +710,6 @@ void DiskObjectStorageTransaction::writeFileUsingBlobWritingFunction(
|
||||
metadata_transaction->createMetadataFile(path, blob_name, object_size);
|
||||
else
|
||||
metadata_transaction->addBlobToMetadata(path, blob_name, object_size);
|
||||
|
||||
metadata_transaction->commit();
|
||||
}
|
||||
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <IO/copyData.h>
|
||||
#include <IO/ReadBufferFromFileBase.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Disks/ObjectStorages/ObjectStorageIterator.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -29,6 +30,14 @@ void IObjectStorage::listObjects(const std::string &, RelativePathsWithMetadata
|
||||
}
|
||||
|
||||
|
||||
ObjectStorageIteratorPtr IObjectStorage::iterate(const std::string & path_prefix) const
|
||||
{
|
||||
RelativePathsWithMetadata files;
|
||||
listObjects(path_prefix, files, 0);
|
||||
|
||||
return std::make_shared<ObjectStorageIteratorFromList>(std::move(files));
|
||||
}
|
||||
|
||||
std::optional<ObjectMetadata> IObjectStorage::tryGetObjectMetadata(const std::string & path) const
|
||||
{
|
||||
try
|
||||
|
@ -20,6 +20,9 @@
|
||||
#include <Disks/WriteMode.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Disks/DirectoryIterator.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Interpreters/threadPoolCallbackRunner.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -51,6 +54,8 @@ struct RelativePathWithMetadata
|
||||
|
||||
using RelativePathsWithMetadata = std::vector<RelativePathWithMetadata>;
|
||||
|
||||
class IObjectStorageIterator;
|
||||
using ObjectStorageIteratorPtr = std::shared_ptr<IObjectStorageIterator>;
|
||||
|
||||
/// Base class for all object storages which implement some subset of ordinary filesystem operations.
|
||||
///
|
||||
@ -75,6 +80,8 @@ public:
|
||||
|
||||
virtual void listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const;
|
||||
|
||||
virtual ObjectStorageIteratorPtr iterate(const std::string & path_prefix) const;
|
||||
|
||||
/// Get object metadata if supported. It should be possible to receive
|
||||
/// at least size of object
|
||||
virtual std::optional<ObjectMetadata> tryGetObjectMetadata(const std::string & path) const;
|
||||
|
20
src/Disks/ObjectStorages/ObjectStorageIterator.cpp
Normal file
20
src/Disks/ObjectStorages/ObjectStorageIterator.cpp
Normal file
@ -0,0 +1,20 @@
|
||||
#include <Disks/ObjectStorages/ObjectStorageIterator.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
RelativePathWithMetadata ObjectStorageIteratorFromList::current() const
|
||||
{
|
||||
if (!isValid())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to access invalid iterator");
|
||||
|
||||
return *batch_iterator;
|
||||
}
|
||||
|
||||
}
|
53
src/Disks/ObjectStorages/ObjectStorageIterator.h
Normal file
53
src/Disks/ObjectStorages/ObjectStorageIterator.h
Normal file
@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
|
||||
#include <Disks/ObjectStorages/IObjectStorage.h>
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class IObjectStorageIterator
|
||||
{
|
||||
public:
|
||||
virtual void next() = 0;
|
||||
virtual bool isValid() const = 0;
|
||||
virtual RelativePathWithMetadata current() const = 0;
|
||||
virtual size_t getAccumulatedSize() const = 0;
|
||||
|
||||
virtual ~IObjectStorageIterator() = default;
|
||||
};
|
||||
|
||||
using ObjectStorageIteratorPtr = std::shared_ptr<IObjectStorageIterator>;
|
||||
|
||||
class ObjectStorageIteratorFromList : public IObjectStorageIterator
|
||||
{
|
||||
public:
|
||||
explicit ObjectStorageIteratorFromList(RelativePathsWithMetadata && batch_)
|
||||
: batch(std::move(batch_))
|
||||
, batch_iterator(batch.begin())
|
||||
{
|
||||
}
|
||||
|
||||
void next() override
|
||||
{
|
||||
if (isValid())
|
||||
++batch_iterator;
|
||||
}
|
||||
|
||||
bool isValid() const override
|
||||
{
|
||||
return batch_iterator != batch.end();
|
||||
}
|
||||
|
||||
RelativePathWithMetadata current() const override;
|
||||
|
||||
size_t getAccumulatedSize() const override
|
||||
{
|
||||
return batch.size();
|
||||
}
|
||||
private:
|
||||
RelativePathsWithMetadata batch;
|
||||
RelativePathsWithMetadata::iterator batch_iterator;
|
||||
};
|
||||
|
||||
}
|
64
src/Disks/ObjectStorages/ObjectStorageIteratorAsync.cpp
Normal file
64
src/Disks/ObjectStorages/ObjectStorageIteratorAsync.cpp
Normal file
@ -0,0 +1,64 @@
|
||||
#include <Disks/ObjectStorages/ObjectStorageIteratorAsync.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
void IObjectStorageIteratorAsync::next()
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
if (current_batch_iterator != current_batch.end())
|
||||
{
|
||||
++current_batch_iterator;
|
||||
}
|
||||
else if (!is_finished)
|
||||
{
|
||||
if (outcome_future.valid())
|
||||
{
|
||||
BatchAndHasNext next_batch = outcome_future.get();
|
||||
current_batch = std::move(next_batch.batch);
|
||||
accumulated_size.fetch_add(current_batch.size(), std::memory_order_relaxed);
|
||||
current_batch_iterator = current_batch.begin();
|
||||
if (next_batch.has_next)
|
||||
outcome_future = scheduleBatch();
|
||||
else
|
||||
is_finished = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::future<IObjectStorageIteratorAsync::BatchAndHasNext> IObjectStorageIteratorAsync::scheduleBatch()
|
||||
{
|
||||
return list_objects_scheduler([this]
|
||||
{
|
||||
BatchAndHasNext result;
|
||||
result.has_next = getBatchAndCheckNext(result.batch);
|
||||
return result;
|
||||
}, Priority{});
|
||||
}
|
||||
|
||||
|
||||
bool IObjectStorageIteratorAsync::isValid() const
|
||||
{
|
||||
return current_batch_iterator != current_batch.end();
|
||||
}
|
||||
|
||||
RelativePathWithMetadata IObjectStorageIteratorAsync::current() const
|
||||
{
|
||||
if (!isValid())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to access invalid iterator");
|
||||
|
||||
return *current_batch_iterator;
|
||||
}
|
||||
|
||||
size_t IObjectStorageIteratorAsync::getAccumulatedSize() const
|
||||
{
|
||||
return accumulated_size.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
}
|
58
src/Disks/ObjectStorages/ObjectStorageIteratorAsync.h
Normal file
58
src/Disks/ObjectStorages/ObjectStorageIteratorAsync.h
Normal file
@ -0,0 +1,58 @@
|
||||
#pragma once
|
||||
|
||||
#include <Disks/ObjectStorages/ObjectStorageIterator.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Interpreters/threadPoolCallbackRunner.h>
|
||||
#include <mutex>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class IObjectStorageIteratorAsync : public IObjectStorageIterator
|
||||
{
|
||||
public:
|
||||
IObjectStorageIteratorAsync(
|
||||
CurrentMetrics::Metric threads_metric,
|
||||
CurrentMetrics::Metric threads_active_metric,
|
||||
const std::string & thread_name)
|
||||
: list_objects_pool(threads_metric, threads_active_metric, 1)
|
||||
, list_objects_scheduler(threadPoolCallbackRunner<BatchAndHasNext>(list_objects_pool, thread_name))
|
||||
{
|
||||
}
|
||||
|
||||
void next() override;
|
||||
bool isValid() const override;
|
||||
RelativePathWithMetadata current() const override;
|
||||
size_t getAccumulatedSize() const override;
|
||||
|
||||
~IObjectStorageIteratorAsync() override
|
||||
{
|
||||
list_objects_pool.wait();
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
virtual bool getBatchAndCheckNext(RelativePathsWithMetadata & batch) = 0;
|
||||
|
||||
struct BatchAndHasNext
|
||||
{
|
||||
RelativePathsWithMetadata batch;
|
||||
bool has_next;
|
||||
};
|
||||
|
||||
std::future<BatchAndHasNext> scheduleBatch();
|
||||
|
||||
bool is_finished{false};
|
||||
|
||||
std::mutex mutex;
|
||||
ThreadPool list_objects_pool;
|
||||
ThreadPoolCallbackRunner<BatchAndHasNext> list_objects_scheduler;
|
||||
std::future<BatchAndHasNext> outcome_future;
|
||||
RelativePathsWithMetadata current_batch;
|
||||
RelativePathsWithMetadata::iterator current_batch_iterator;
|
||||
std::atomic<size_t> accumulated_size = 0;
|
||||
};
|
||||
|
||||
|
||||
}
|
@ -3,6 +3,7 @@
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <IO/S3Common.h>
|
||||
#include <Disks/ObjectStorages/ObjectStorageIteratorAsync.h>
|
||||
|
||||
#include <Disks/IO/ReadBufferFromRemoteFSGather.h>
|
||||
#include <Disks/ObjectStorages/DiskObjectStorageCommon.h>
|
||||
@ -33,6 +34,13 @@ namespace ProfileEvents
|
||||
extern const Event DiskS3ListObjects;
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric ObjectStorageS3Threads;
|
||||
extern const Metric ObjectStorageS3ThreadsActive;
|
||||
}
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -84,6 +92,62 @@ void logIfError(const Aws::Utils::Outcome<Result, Error> & response, std::functi
|
||||
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class S3IteratorAsync final : public IObjectStorageIteratorAsync
|
||||
{
|
||||
public:
|
||||
S3IteratorAsync(
|
||||
const std::string & bucket,
|
||||
const std::string & path_prefix,
|
||||
std::shared_ptr<const S3::Client> client_,
|
||||
size_t max_list_size)
|
||||
: IObjectStorageIteratorAsync(
|
||||
CurrentMetrics::ObjectStorageS3Threads,
|
||||
CurrentMetrics::ObjectStorageS3ThreadsActive,
|
||||
"ListObjectS3")
|
||||
, client(client_)
|
||||
{
|
||||
request.SetBucket(bucket);
|
||||
request.SetPrefix(path_prefix);
|
||||
request.SetMaxKeys(static_cast<int>(max_list_size));
|
||||
}
|
||||
|
||||
private:
|
||||
bool getBatchAndCheckNext(RelativePathsWithMetadata & batch) override
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::S3ListObjects);
|
||||
|
||||
bool result = false;
|
||||
auto outcome = client->ListObjectsV2(request);
|
||||
/// Outcome failure will be handled on the caller side.
|
||||
if (outcome.IsSuccess())
|
||||
{
|
||||
auto objects = outcome.GetResult().GetContents();
|
||||
|
||||
result = !objects.empty();
|
||||
|
||||
for (const auto & object : objects)
|
||||
batch.emplace_back(object.GetKey(), ObjectMetadata{static_cast<uint64_t>(object.GetSize()), Poco::Timestamp::fromEpochTime(object.GetLastModified().Seconds()), {}});
|
||||
|
||||
if (result)
|
||||
request.SetContinuationToken(outcome.GetResult().GetNextContinuationToken());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::S3_ERROR, "Could not list objects in bucket {} with prefix {}, S3 exception: {}, message: {}",
|
||||
quoteString(request.GetBucket()), quoteString(request.GetPrefix()),
|
||||
backQuote(outcome.GetError().GetExceptionName()), quoteString(outcome.GetError().GetMessage()));
|
||||
}
|
||||
|
||||
std::shared_ptr<const S3::Client> client;
|
||||
S3::ListObjectsV2Request request;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
bool S3ObjectStorage::exists(const StoredObject & object) const
|
||||
{
|
||||
auto settings_ptr = s3_settings.get();
|
||||
@ -183,6 +247,15 @@ std::unique_ptr<WriteBufferFromFileBase> S3ObjectStorage::writeObject( /// NOLIN
|
||||
disk_write_settings);
|
||||
}
|
||||
|
||||
|
||||
ObjectStorageIteratorPtr S3ObjectStorage::iterate(const std::string & path_prefix) const
|
||||
{
|
||||
auto settings_ptr = s3_settings.get();
|
||||
auto client_ptr = client.get();
|
||||
|
||||
return std::make_shared<S3IteratorAsync>(bucket, path_prefix, client_ptr, settings_ptr->list_object_keys_size);
|
||||
}
|
||||
|
||||
void S3ObjectStorage::listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const
|
||||
{
|
||||
auto settings_ptr = s3_settings.get();
|
||||
|
@ -102,6 +102,8 @@ public:
|
||||
|
||||
void listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const override;
|
||||
|
||||
ObjectStorageIteratorPtr iterate(const std::string & path_prefix) const override;
|
||||
|
||||
/// Uses `DeleteObjectRequest`.
|
||||
void removeObject(const StoredObject & object) override;
|
||||
|
||||
|
@ -364,7 +364,7 @@ std::unique_ptr<ReadBuffer> FormatFactory::wrapReadBufferIfNeeded(
|
||||
settings.max_download_buffer_size);
|
||||
|
||||
res = wrapInParallelReadBufferIfSupported(
|
||||
buf, threadPoolCallbackRunner<void>(IOThreadPool::get(), "ParallelRead"),
|
||||
buf, threadPoolCallbackRunner<void>(getIOThreadPool().get(), "ParallelRead"),
|
||||
max_download_threads, settings.max_download_buffer_size, file_size);
|
||||
}
|
||||
|
||||
|
@ -45,9 +45,9 @@ namespace JSONUtils
|
||||
const auto current_object_size = memory.size() + static_cast<size_t>(pos - in.position());
|
||||
if (min_bytes != 0 && current_object_size > 10 * min_bytes)
|
||||
throw ParsingException(ErrorCodes::INCORRECT_DATA,
|
||||
"Size of JSON object is extremely large. Expected not greater than {} bytes, but current is {} bytes per row. "
|
||||
"Size of JSON object at position {} is extremely large. Expected not greater than {} bytes, but current is {} bytes per row. "
|
||||
"Increase the value setting 'min_chunk_bytes_for_parallel_parsing' or check your data manually, "
|
||||
"most likely JSON is malformed", min_bytes, current_object_size);
|
||||
"most likely JSON is malformed", in.count(), min_bytes, current_object_size);
|
||||
|
||||
if (quotes)
|
||||
{
|
||||
|
@ -45,11 +45,26 @@ Accepts 3 parameters:
|
||||
Returned value: value of the dictionary attribute parsed in the attribute’s data type if key is found, otherwise NULL.
|
||||
|
||||
Throws an exception if cannot parse the value of the attribute or the value does not match the attribute data type.
|
||||
)" };
|
||||
|
||||
constexpr auto dict_get_all_description { R"(
|
||||
Retrieves all values from a dictionary corresponding to the given key values.
|
||||
|
||||
Accepts 3 or 4 parameters:
|
||||
-- name of the dictionary;
|
||||
-- name of the column of the dictionary or tuple of column names;
|
||||
-- key value - expression returning dictionary key-type value or tuple-type value - depending on the dictionary configuration;
|
||||
-- [optional] maximum number of values to return for each attribute;
|
||||
|
||||
Returned value: array of dictionary attribute values parsed in the attribute's data type if key is found, otherwise empty array.
|
||||
|
||||
Throws an exception if cannot parse the value of the attribute, the value does not match the attribute data type, or the dictionary doesn't support this function.
|
||||
)" };
|
||||
|
||||
factory.registerFunction<FunctionDictGetNoType<DictionaryGetFunctionType::get>>(FunctionDocumentation{ .description=fmt::format(dict_get_description, "attribute’s data type") });
|
||||
factory.registerFunction<FunctionDictGetNoType<DictionaryGetFunctionType::getOrDefault>>(FunctionDocumentation{ .description=fmt::format(dict_get_or_default_description, "attribute’s data type") });
|
||||
factory.registerFunction<FunctionDictGetOrNull>(FunctionDocumentation{ .description=dict_get_or_null_description });
|
||||
factory.registerFunction<FunctionDictGetNoType<DictionaryGetFunctionType::getAll>>(FunctionDocumentation{ .description=dict_get_all_description });
|
||||
|
||||
factory.registerFunction<FunctionDictGetUInt8>(FunctionDocumentation{ .description=fmt::format(dict_get_description, "UInt8") });
|
||||
factory.registerFunction<FunctionDictGetUInt16>(FunctionDocumentation{ .description=fmt::format(dict_get_description, "UInt16") });
|
||||
|
@ -296,7 +296,8 @@ private:
|
||||
enum class DictionaryGetFunctionType
|
||||
{
|
||||
get,
|
||||
getOrDefault
|
||||
getOrDefault,
|
||||
getAll
|
||||
};
|
||||
|
||||
/// This variant of function derives the result type automatically.
|
||||
@ -304,7 +305,10 @@ template <DictionaryGetFunctionType dictionary_get_function_type>
|
||||
class FunctionDictGetNoType final : public IFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = dictionary_get_function_type == DictionaryGetFunctionType::get ? "dictGet" : "dictGetOrDefault";
|
||||
// Kind of gross but we need a static field called "name" for FunctionFactory::registerFunction, and this is the easiest way
|
||||
static constexpr auto name = (dictionary_get_function_type == DictionaryGetFunctionType::get)
|
||||
? "dictGet"
|
||||
: ((dictionary_get_function_type == DictionaryGetFunctionType::getOrDefault) ? "dictGetOrDefault" : "dictGetAll");
|
||||
|
||||
static FunctionPtr create(ContextPtr context)
|
||||
{
|
||||
@ -321,7 +325,13 @@ public:
|
||||
|
||||
bool useDefaultImplementationForConstants() const final { return true; }
|
||||
bool useDefaultImplementationForNulls() const final { return false; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const final { return {0, 1}; }
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const final
|
||||
{
|
||||
if constexpr (dictionary_get_function_type == DictionaryGetFunctionType::getAll)
|
||||
return {0, 1, 3};
|
||||
else
|
||||
return {0, 1};
|
||||
}
|
||||
|
||||
bool isDeterministic() const override { return false; }
|
||||
|
||||
@ -360,6 +370,15 @@ public:
|
||||
}
|
||||
|
||||
bool key_is_nullable = arguments[2].type->isNullable();
|
||||
if constexpr (dictionary_get_function_type == DictionaryGetFunctionType::getAll)
|
||||
{
|
||||
if (key_is_nullable)
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Function {} does not support nullable keys", getName());
|
||||
|
||||
// Wrap all the attribute types in Array()
|
||||
for (auto it = attribute_types.begin(); it != attribute_types.end(); ++it)
|
||||
*it = std::make_shared<DataTypeArray>(*it);
|
||||
}
|
||||
if (attribute_types.size() > 1)
|
||||
{
|
||||
if (key_is_nullable)
|
||||
@ -424,6 +443,7 @@ public:
|
||||
}
|
||||
|
||||
Columns default_cols;
|
||||
size_t collect_values_limit = std::numeric_limits<size_t>::max();
|
||||
|
||||
if (dictionary_get_function_type == DictionaryGetFunctionType::getOrDefault)
|
||||
{
|
||||
@ -464,6 +484,20 @@ public:
|
||||
}
|
||||
else
|
||||
{
|
||||
if (dictionary_get_function_type == DictionaryGetFunctionType::getAll && current_arguments_index < arguments.size())
|
||||
{
|
||||
auto limit_col = arguments[current_arguments_index].column;
|
||||
// The getUInt later attempts to cast and throws on a type mismatch, so skip actual type checking here
|
||||
if (!limit_col || !isColumnConst(*limit_col))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type {} of fourth argument of function {}. Expected const unsigned integer.",
|
||||
arguments[current_arguments_index].type->getName(),
|
||||
getName());
|
||||
|
||||
collect_values_limit = limit_col->getUInt(0);
|
||||
++current_arguments_index;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < attribute_names.size(); ++i)
|
||||
default_cols.emplace_back(nullptr);
|
||||
}
|
||||
@ -549,7 +583,8 @@ public:
|
||||
attribute_type = attribute_types.front();
|
||||
}
|
||||
|
||||
auto result_column = executeDictionaryRequest(dictionary, attribute_names, key_columns, key_types, attribute_type, default_cols);
|
||||
auto result_column = executeDictionaryRequest(
|
||||
dictionary, attribute_names, key_columns, key_types, attribute_type, default_cols, collect_values_limit);
|
||||
|
||||
if (key_is_nullable)
|
||||
result_column = wrapInNullable(result_column, {arguments[2]}, result_type, input_rows_count);
|
||||
@ -565,7 +600,8 @@ private:
|
||||
const Columns & key_columns,
|
||||
const DataTypes & key_types,
|
||||
const DataTypePtr & result_type,
|
||||
const Columns & default_cols) const
|
||||
const Columns & default_cols,
|
||||
size_t collect_values_limit) const
|
||||
{
|
||||
ColumnPtr result;
|
||||
|
||||
@ -573,23 +609,31 @@ private:
|
||||
{
|
||||
const auto & result_tuple_type = assert_cast<const DataTypeTuple &>(*result_type);
|
||||
|
||||
Columns result_columns = dictionary->getColumns(
|
||||
attribute_names,
|
||||
result_tuple_type.getElements(),
|
||||
key_columns,
|
||||
key_types,
|
||||
default_cols);
|
||||
Columns result_columns;
|
||||
if constexpr (dictionary_get_function_type == DictionaryGetFunctionType::getAll)
|
||||
{
|
||||
result_columns = dictionary->getColumnsAllValues(
|
||||
attribute_names, result_tuple_type.getElements(), key_columns, key_types, default_cols, collect_values_limit);
|
||||
}
|
||||
else
|
||||
{
|
||||
result_columns
|
||||
= dictionary->getColumns(attribute_names, result_tuple_type.getElements(), key_columns, key_types, default_cols);
|
||||
}
|
||||
|
||||
result = ColumnTuple::create(std::move(result_columns));
|
||||
}
|
||||
else
|
||||
{
|
||||
result = dictionary->getColumn(
|
||||
attribute_names[0],
|
||||
result_type,
|
||||
key_columns,
|
||||
key_types,
|
||||
default_cols.front());
|
||||
if constexpr (dictionary_get_function_type == DictionaryGetFunctionType::getAll)
|
||||
{
|
||||
result = dictionary->getColumnAllValues(
|
||||
attribute_names[0], result_type, key_columns, key_types, default_cols.front(), collect_values_limit);
|
||||
}
|
||||
else
|
||||
{
|
||||
result = dictionary->getColumn(attribute_names[0], result_type, key_columns, key_types, default_cols.front());
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -1116,6 +1116,32 @@ public:
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
ColumnPtr getConstantResultForNonConstArguments(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const override
|
||||
{
|
||||
const ColumnWithTypeAndName & arg_cond = arguments[0];
|
||||
if (!arg_cond.column || !isColumnConst(*arg_cond.column))
|
||||
return {};
|
||||
|
||||
const ColumnConst * cond_const_col = checkAndGetColumnConst<ColumnVector<UInt8>>(arg_cond.column.get());
|
||||
if (!cond_const_col)
|
||||
return {};
|
||||
|
||||
bool condition_value = cond_const_col->getValue<UInt8>();
|
||||
|
||||
const ColumnWithTypeAndName & arg_then = arguments[1];
|
||||
const ColumnWithTypeAndName & arg_else = arguments[2];
|
||||
const ColumnWithTypeAndName & potential_const_column = condition_value ? arg_then : arg_else;
|
||||
|
||||
if (!potential_const_column.column || !isColumnConst(*potential_const_column.column))
|
||||
return {};
|
||||
|
||||
auto result = castColumn(potential_const_column, result_type);
|
||||
if (!isColumnConst(*result))
|
||||
return {};
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ namespace ErrorCodes
|
||||
extern const int CANNOT_PARSE_QUOTED_STRING;
|
||||
extern const int CANNOT_PARSE_DATETIME;
|
||||
extern const int CANNOT_PARSE_DATE;
|
||||
extern const int CANNOT_PARSE_UUID;
|
||||
extern const int INCORRECT_DATA;
|
||||
extern const int ATTEMPT_TO_READ_AFTER_EOF;
|
||||
extern const int LOGICAL_ERROR;
|
||||
@ -46,48 +47,45 @@ inline void parseHex(IteratorSrc src, IteratorDst dst)
|
||||
dst[dst_pos] = unhex2(reinterpret_cast<const char *>(&src[src_pos]));
|
||||
}
|
||||
|
||||
void parseUUID(const UInt8 * src36, UInt8 * dst16)
|
||||
UUID parseUUID(std::span<const UInt8> src)
|
||||
{
|
||||
/// If string is not like UUID - implementation specific behaviour.
|
||||
UUID uuid;
|
||||
const auto * src_ptr = src.data();
|
||||
auto * dst = reinterpret_cast<UInt8 *>(&uuid);
|
||||
const auto size = src.size();
|
||||
|
||||
parseHex<4>(&src36[0], &dst16[0]);
|
||||
parseHex<2>(&src36[9], &dst16[4]);
|
||||
parseHex<2>(&src36[14], &dst16[6]);
|
||||
parseHex<2>(&src36[19], &dst16[8]);
|
||||
parseHex<6>(&src36[24], &dst16[10]);
|
||||
}
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
const std::reverse_iterator dst_it(dst + sizeof(UUID));
|
||||
#endif
|
||||
if (size == 36)
|
||||
{
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
parseHex<4>(src_ptr, dst_it + 8);
|
||||
parseHex<2>(src_ptr + 9, dst_it + 12);
|
||||
parseHex<2>(src_ptr + 14, dst_it + 14);
|
||||
parseHex<2>(src_ptr + 19, dst_it);
|
||||
parseHex<6>(src_ptr + 24, dst_it + 2);
|
||||
#else
|
||||
parseHex<4>(src_ptr, dst);
|
||||
parseHex<2>(src_ptr + 9, dst + 4);
|
||||
parseHex<2>(src_ptr + 14, dst + 6);
|
||||
parseHex<2>(src_ptr + 19, dst + 8);
|
||||
parseHex<6>(src_ptr + 24, dst + 10);
|
||||
#endif
|
||||
}
|
||||
else if (size == 32)
|
||||
{
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
parseHex<8>(src_ptr, dst_it + 8);
|
||||
parseHex<8>(src_ptr + 16, dst_it);
|
||||
#else
|
||||
parseHex<16>(src_ptr, dst);
|
||||
#endif
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::CANNOT_PARSE_UUID, "Unexpected length when trying to parse UUID ({})", size);
|
||||
|
||||
void parseUUIDWithoutSeparator(const UInt8 * src36, UInt8 * dst16)
|
||||
{
|
||||
/// If string is not like UUID - implementation specific behaviour.
|
||||
|
||||
parseHex<16>(&src36[0], &dst16[0]);
|
||||
}
|
||||
|
||||
/** Function used when byte ordering is important when parsing uuid
|
||||
* ex: When we create an UUID type
|
||||
*/
|
||||
void parseUUID(const UInt8 * src36, std::reverse_iterator<UInt8 *> dst16)
|
||||
{
|
||||
/// If string is not like UUID - implementation specific behaviour.
|
||||
|
||||
/// FIXME This code looks like trash.
|
||||
parseHex<4>(&src36[0], dst16 + 8);
|
||||
parseHex<2>(&src36[9], dst16 + 12);
|
||||
parseHex<2>(&src36[14], dst16 + 14);
|
||||
parseHex<2>(&src36[19], dst16);
|
||||
parseHex<6>(&src36[24], dst16 + 2);
|
||||
}
|
||||
|
||||
/** Function used when byte ordering is important when parsing uuid
|
||||
* ex: When we create an UUID type
|
||||
*/
|
||||
void parseUUIDWithoutSeparator(const UInt8 * src36, std::reverse_iterator<UInt8 *> dst16)
|
||||
{
|
||||
/// If string is not like UUID - implementation specific behaviour.
|
||||
|
||||
parseHex<8>(&src36[0], dst16 + 8);
|
||||
parseHex<8>(&src36[16], dst16);
|
||||
return uuid;
|
||||
}
|
||||
|
||||
void NO_INLINE throwAtAssertionFailed(const char * s, ReadBuffer & buf)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <bit>
|
||||
#include <span>
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
@ -623,12 +624,6 @@ struct NullOutput
|
||||
void push_back(char) {} /// NOLINT
|
||||
};
|
||||
|
||||
void parseUUID(const UInt8 * src36, UInt8 * dst16);
|
||||
void parseUUIDWithoutSeparator(const UInt8 * src36, UInt8 * dst16);
|
||||
void parseUUID(const UInt8 * src36, std::reverse_iterator<UInt8 *> dst16);
|
||||
void parseUUIDWithoutSeparator(const UInt8 * src36, std::reverse_iterator<UInt8 *> dst16);
|
||||
|
||||
|
||||
template <typename ReturnType>
|
||||
ReturnType readDateTextFallback(LocalDate & date, ReadBuffer & buf);
|
||||
|
||||
@ -770,6 +765,8 @@ inline bool tryReadDateText(ExtendedDayNum & date, ReadBuffer & buf)
|
||||
return readDateTextImpl<bool>(date, buf);
|
||||
}
|
||||
|
||||
UUID parseUUID(std::span<const UInt8> src);
|
||||
|
||||
template <typename ReturnType = void>
|
||||
inline ReturnType readUUIDTextImpl(UUID & uuid, ReadBuffer & buf)
|
||||
{
|
||||
@ -797,12 +794,9 @@ inline ReturnType readUUIDTextImpl(UUID & uuid, ReadBuffer & buf)
|
||||
return ReturnType(false);
|
||||
}
|
||||
}
|
||||
|
||||
parseUUID(reinterpret_cast<const UInt8 *>(s), std::reverse_iterator<UInt8 *>(reinterpret_cast<UInt8 *>(&uuid) + 16));
|
||||
}
|
||||
else
|
||||
parseUUIDWithoutSeparator(reinterpret_cast<const UInt8 *>(s), std::reverse_iterator<UInt8 *>(reinterpret_cast<UInt8 *>(&uuid) + 16));
|
||||
|
||||
uuid = parseUUID({reinterpret_cast<const UInt8 *>(s), size});
|
||||
return ReturnType(true);
|
||||
}
|
||||
else
|
||||
|
@ -99,6 +99,8 @@ struct ReadSettings
|
||||
bool read_from_filesystem_cache_if_exists_otherwise_bypass_cache = false;
|
||||
bool enable_filesystem_cache_log = false;
|
||||
bool is_file_cache_persistent = false; /// Some files can be made non-evictable.
|
||||
/// Don't populate cache when the read is not part of query execution (e.g. background thread).
|
||||
bool avoid_readthrough_cache_outside_query_context = true;
|
||||
|
||||
size_t filesystem_cache_max_download_size = (128UL * 1024 * 1024 * 1024);
|
||||
bool skip_download_if_exceeds_query_cache = true;
|
||||
|
@ -9,8 +9,12 @@ namespace CurrentMetrics
|
||||
extern const Metric IOThreadsActive;
|
||||
extern const Metric BackupsIOThreads;
|
||||
extern const Metric BackupsIOThreadsActive;
|
||||
extern const Metric OutdatedPartsLoadingThreads;
|
||||
extern const Metric OutdatedPartsLoadingThreadsActive;
|
||||
extern const Metric MergeTreePartsLoaderThreads;
|
||||
extern const Metric MergeTreePartsLoaderThreadsActive;
|
||||
extern const Metric MergeTreePartsCleanerThreads;
|
||||
extern const Metric MergeTreePartsCleanerThreadsActive;
|
||||
extern const Metric MergeTreeOutdatedPartsLoaderThreads;
|
||||
extern const Metric MergeTreeOutdatedPartsLoaderThreadsActive;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -21,88 +25,117 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
std::unique_ptr<ThreadPool> IOThreadPool::instance;
|
||||
|
||||
void IOThreadPool::initialize(size_t max_threads, size_t max_free_threads, size_t queue_size)
|
||||
StaticThreadPool::StaticThreadPool(
|
||||
const String & name_,
|
||||
CurrentMetrics::Metric threads_metric_,
|
||||
CurrentMetrics::Metric threads_active_metric_)
|
||||
: name(name_)
|
||||
, threads_metric(threads_metric_)
|
||||
, threads_active_metric(threads_active_metric_)
|
||||
{
|
||||
}
|
||||
|
||||
void StaticThreadPool::initialize(size_t max_threads, size_t max_free_threads, size_t queue_size)
|
||||
{
|
||||
if (instance)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The IO thread pool is initialized twice");
|
||||
}
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The {} is initialized twice", name);
|
||||
|
||||
/// By default enabling "turbo mode" won't affect the number of threads anyhow
|
||||
max_threads_turbo = max_threads;
|
||||
max_threads_normal = max_threads;
|
||||
instance = std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::IOThreads,
|
||||
CurrentMetrics::IOThreadsActive,
|
||||
threads_metric,
|
||||
threads_active_metric,
|
||||
max_threads,
|
||||
max_free_threads,
|
||||
queue_size,
|
||||
/* shutdown_on_exception= */ false);
|
||||
}
|
||||
|
||||
ThreadPool & IOThreadPool::get()
|
||||
void StaticThreadPool::reloadConfiguration(size_t max_threads, size_t max_free_threads, size_t queue_size)
|
||||
{
|
||||
if (!instance)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The IO thread pool is not initialized");
|
||||
}
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The {} is not initialized", name);
|
||||
|
||||
instance->setMaxThreads(turbo_mode_enabled > 0 ? max_threads_turbo : max_threads);
|
||||
instance->setMaxFreeThreads(max_free_threads);
|
||||
instance->setQueueSize(queue_size);
|
||||
}
|
||||
|
||||
|
||||
ThreadPool & StaticThreadPool::get()
|
||||
{
|
||||
if (!instance)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The {} is not initialized", name);
|
||||
|
||||
return *instance;
|
||||
}
|
||||
|
||||
std::unique_ptr<ThreadPool> BackupsIOThreadPool::instance;
|
||||
|
||||
void BackupsIOThreadPool::initialize(size_t max_threads, size_t max_free_threads, size_t queue_size)
|
||||
{
|
||||
if (instance)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The BackupsIO thread pool is initialized twice");
|
||||
}
|
||||
|
||||
instance = std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::BackupsIOThreads,
|
||||
CurrentMetrics::BackupsIOThreadsActive,
|
||||
max_threads,
|
||||
max_free_threads,
|
||||
queue_size,
|
||||
/* shutdown_on_exception= */ false);
|
||||
}
|
||||
|
||||
ThreadPool & BackupsIOThreadPool::get()
|
||||
void StaticThreadPool::enableTurboMode()
|
||||
{
|
||||
if (!instance)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The BackupsIO thread pool is not initialized");
|
||||
}
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The {} is not initialized", name);
|
||||
|
||||
return *instance;
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
++turbo_mode_enabled;
|
||||
if (turbo_mode_enabled == 1)
|
||||
instance->setMaxThreads(max_threads_turbo);
|
||||
}
|
||||
|
||||
std::unique_ptr<ThreadPool> OutdatedPartsLoadingThreadPool::instance;
|
||||
|
||||
void OutdatedPartsLoadingThreadPool::initialize(size_t max_threads, size_t max_free_threads, size_t queue_size)
|
||||
{
|
||||
if (instance)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The PartsLoadingThreadPool thread pool is initialized twice");
|
||||
}
|
||||
|
||||
instance = std::make_unique<ThreadPool>(
|
||||
CurrentMetrics::OutdatedPartsLoadingThreads,
|
||||
CurrentMetrics::OutdatedPartsLoadingThreadsActive,
|
||||
max_threads,
|
||||
max_free_threads,
|
||||
queue_size,
|
||||
/* shutdown_on_exception= */ false);
|
||||
}
|
||||
|
||||
ThreadPool & OutdatedPartsLoadingThreadPool::get()
|
||||
void StaticThreadPool::disableTurboMode()
|
||||
{
|
||||
if (!instance)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The PartsLoadingThreadPool thread pool is not initialized");
|
||||
}
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The {} is not initialized", name);
|
||||
|
||||
return *instance;
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
--turbo_mode_enabled;
|
||||
if (turbo_mode_enabled == 0)
|
||||
instance->setMaxThreads(max_threads_normal);
|
||||
}
|
||||
|
||||
void StaticThreadPool::setMaxTurboThreads(size_t max_threads_turbo_)
|
||||
{
|
||||
if (!instance)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "The {} is not initialized", name);
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
max_threads_turbo = max_threads_turbo_;
|
||||
if (turbo_mode_enabled > 0)
|
||||
instance->setMaxThreads(max_threads_turbo);
|
||||
}
|
||||
|
||||
StaticThreadPool & getIOThreadPool()
|
||||
{
|
||||
static StaticThreadPool instance("IOThreadPool", CurrentMetrics::IOThreads, CurrentMetrics::IOThreadsActive);
|
||||
return instance;
|
||||
}
|
||||
|
||||
StaticThreadPool & getBackupsIOThreadPool()
|
||||
{
|
||||
static StaticThreadPool instance("BackupsIOThreadPool", CurrentMetrics::BackupsIOThreads, CurrentMetrics::BackupsIOThreadsActive);
|
||||
return instance;
|
||||
}
|
||||
|
||||
StaticThreadPool & getActivePartsLoadingThreadPool()
|
||||
{
|
||||
static StaticThreadPool instance("MergeTreePartsLoaderThreadPool", CurrentMetrics::MergeTreePartsLoaderThreads, CurrentMetrics::MergeTreePartsLoaderThreadsActive);
|
||||
return instance;
|
||||
}
|
||||
|
||||
StaticThreadPool & getPartsCleaningThreadPool()
|
||||
{
|
||||
static StaticThreadPool instance("MergeTreePartsCleanerThreadPool", CurrentMetrics::MergeTreePartsCleanerThreads, CurrentMetrics::MergeTreePartsCleanerThreadsActive);
|
||||
return instance;
|
||||
}
|
||||
|
||||
StaticThreadPool & getOutdatedPartsLoadingThreadPool()
|
||||
{
|
||||
static StaticThreadPool instance("MergeTreeOutdatedPartsLoaderThreadPool", CurrentMetrics::MergeTreeOutdatedPartsLoaderThreads, CurrentMetrics::MergeTreeOutdatedPartsLoaderThreadsActive);
|
||||
return instance;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,48 +1,64 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Common/ThreadPool_fwd.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/*
|
||||
* ThreadPool used for the IO.
|
||||
*/
|
||||
class IOThreadPool
|
||||
class StaticThreadPool
|
||||
{
|
||||
static std::unique_ptr<ThreadPool> instance;
|
||||
|
||||
public:
|
||||
static void initialize(size_t max_threads, size_t max_free_threads, size_t queue_size);
|
||||
static ThreadPool & get();
|
||||
StaticThreadPool(
|
||||
const String & name_,
|
||||
CurrentMetrics::Metric threads_metric_,
|
||||
CurrentMetrics::Metric threads_active_metric_);
|
||||
|
||||
ThreadPool & get();
|
||||
|
||||
void initialize(size_t max_threads, size_t max_free_threads, size_t queue_size);
|
||||
void reloadConfiguration(size_t max_threads, size_t max_free_threads, size_t queue_size);
|
||||
|
||||
/// At runtime we can increase the number of threads up the specified limit
|
||||
/// This is needed to utilize as much a possible resources to accomplish some task.
|
||||
void setMaxTurboThreads(size_t max_threads_turbo_);
|
||||
void enableTurboMode();
|
||||
void disableTurboMode();
|
||||
|
||||
private:
|
||||
const String name;
|
||||
const CurrentMetrics::Metric threads_metric;
|
||||
const CurrentMetrics::Metric threads_active_metric;
|
||||
|
||||
std::unique_ptr<ThreadPool> instance;
|
||||
std::mutex mutex;
|
||||
size_t max_threads_turbo = 0;
|
||||
size_t max_threads_normal = 0;
|
||||
/// If this counter is > 0 - this specific mode is enabled
|
||||
size_t turbo_mode_enabled = 0;
|
||||
};
|
||||
|
||||
/// ThreadPool used for the IO.
|
||||
StaticThreadPool & getIOThreadPool();
|
||||
|
||||
/*
|
||||
* ThreadPool used for the Backup IO.
|
||||
*/
|
||||
class BackupsIOThreadPool
|
||||
{
|
||||
static std::unique_ptr<ThreadPool> instance;
|
||||
/// ThreadPool used for the Backup IO.
|
||||
StaticThreadPool & getBackupsIOThreadPool();
|
||||
|
||||
public:
|
||||
static void initialize(size_t max_threads, size_t max_free_threads, size_t queue_size);
|
||||
static ThreadPool & get();
|
||||
};
|
||||
/// ThreadPool used for the loading of Outdated data parts for MergeTree tables.
|
||||
StaticThreadPool & getActivePartsLoadingThreadPool();
|
||||
|
||||
/// ThreadPool used for deleting data parts for MergeTree tables.
|
||||
StaticThreadPool & getPartsCleaningThreadPool();
|
||||
|
||||
/*
|
||||
* ThreadPool used for the loading of Outdated data parts for MergeTree tables.
|
||||
*/
|
||||
class OutdatedPartsLoadingThreadPool
|
||||
{
|
||||
static std::unique_ptr<ThreadPool> instance;
|
||||
|
||||
public:
|
||||
static void initialize(size_t max_threads, size_t max_free_threads, size_t queue_size);
|
||||
static ThreadPool & get();
|
||||
};
|
||||
/// This ThreadPool is used for the loading of Outdated data parts for MergeTree tables.
|
||||
/// Normally we will just load Outdated data parts concurrently in background, but in
|
||||
/// case when we need to synchronously wait for the loading to be finished, we can increase
|
||||
/// the number of threads by calling enableTurboMode() :-)
|
||||
StaticThreadPool & getOutdatedPartsLoadingThreadPool();
|
||||
|
||||
}
|
||||
|
@ -20,20 +20,35 @@ void formatHex(IteratorSrc src, IteratorDst dst, size_t num_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/** Function used when byte ordering is important when parsing uuid
|
||||
* ex: When we create an UUID type
|
||||
*/
|
||||
void formatUUID(std::reverse_iterator<const UInt8 *> src16, UInt8 * dst36)
|
||||
std::array<char, 36> formatUUID(const UUID & uuid)
|
||||
{
|
||||
formatHex(src16 + 8, &dst36[0], 4);
|
||||
dst36[8] = '-';
|
||||
formatHex(src16 + 12, &dst36[9], 2);
|
||||
dst36[13] = '-';
|
||||
formatHex(src16 + 14, &dst36[14], 2);
|
||||
dst36[18] = '-';
|
||||
formatHex(src16, &dst36[19], 2);
|
||||
dst36[23] = '-';
|
||||
formatHex(src16 + 2, &dst36[24], 6);
|
||||
std::array<char, 36> dst;
|
||||
const auto * src_ptr = reinterpret_cast<const UInt8 *>(&uuid);
|
||||
auto * dst_ptr = dst.data();
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
const std::reverse_iterator src_it(src_ptr + 16);
|
||||
formatHex(src_it + 8, dst_ptr, 4);
|
||||
dst[8] = '-';
|
||||
formatHex(src_it + 12, dst_ptr + 9, 2);
|
||||
dst[13] = '-';
|
||||
formatHex(src_it + 14, dst_ptr + 14, 2);
|
||||
dst[18] = '-';
|
||||
formatHex(src_it, dst_ptr + 19, 2);
|
||||
dst[23] = '-';
|
||||
formatHex(src_it + 2, dst_ptr + 24, 6);
|
||||
#else
|
||||
formatHex(src_ptr, dst_ptr, 4);
|
||||
dst[8] = '-';
|
||||
formatHex(src_ptr + 4, dst_ptr + 9, 2);
|
||||
dst[13] = '-';
|
||||
formatHex(src_ptr + 6, dst_ptr + 14, 2);
|
||||
dst[18] = '-';
|
||||
formatHex(src_ptr + 8, dst_ptr + 19, 2);
|
||||
dst[23] = '-';
|
||||
formatHex(src_ptr + 10, dst_ptr + 24, 6);
|
||||
#endif
|
||||
|
||||
return dst;
|
||||
}
|
||||
|
||||
void writeIPv4Text(const IPv4 & ip, WriteBuffer & buf)
|
||||
|
@ -625,13 +625,15 @@ inline void writeXMLStringForTextElement(std::string_view s, WriteBuffer & buf)
|
||||
writeXMLStringForTextElement(s.data(), s.data() + s.size(), buf);
|
||||
}
|
||||
|
||||
void formatUUID(std::reverse_iterator<const UInt8 *> src16, UInt8 * dst36);
|
||||
/// @brief Serialize `uuid` into an array of characters in big-endian byte order.
|
||||
/// @param uuid UUID to serialize.
|
||||
/// @return Array of characters in big-endian byte order.
|
||||
std::array<char, 36> formatUUID(const UUID & uuid);
|
||||
|
||||
inline void writeUUIDText(const UUID & uuid, WriteBuffer & buf)
|
||||
{
|
||||
char s[36];
|
||||
formatUUID(std::reverse_iterator<const UInt8 *>(reinterpret_cast<const UInt8 *>(&uuid) + 16), reinterpret_cast<UInt8 *>(s));
|
||||
buf.write(s, sizeof(s));
|
||||
const auto serialized_uuid = formatUUID(uuid);
|
||||
buf.write(serialized_uuid.data(), serialized_uuid.size());
|
||||
}
|
||||
|
||||
void writeIPv4Text(const IPv4 & ip, WriteBuffer & buf);
|
||||
|
@ -149,7 +149,7 @@ FileSegments FileCache::getImpl(const LockedKey & locked_key, const FileSegment:
|
||||
auto add_to_result = [&](const FileSegmentMetadata & file_segment_metadata)
|
||||
{
|
||||
FileSegmentPtr file_segment;
|
||||
if (file_segment_metadata.valid())
|
||||
if (!file_segment_metadata.evicting())
|
||||
{
|
||||
file_segment = file_segment_metadata.file_segment;
|
||||
if (file_segment->isDownloaded())
|
||||
|
@ -85,7 +85,7 @@ public:
|
||||
EMPTY,
|
||||
/**
|
||||
* A newly created file segment never has DOWNLOADING state until call to getOrSetDownloader
|
||||
* because each cache user might acquire multiple file segments and reads them one by one,
|
||||
* because each cache user might acquire multiple file segments and read them one by one,
|
||||
* so only user which actually needs to read this segment earlier than others - becomes a downloader.
|
||||
*/
|
||||
DOWNLOADING,
|
||||
|
@ -85,6 +85,7 @@ public:
|
||||
|
||||
virtual void removeAll(const CacheGuard::Lock &) = 0;
|
||||
|
||||
/// From lowest to highest priority.
|
||||
virtual void iterate(IterateFunc && func, const CacheGuard::Lock &) = 0;
|
||||
|
||||
private:
|
||||
|
@ -346,6 +346,16 @@ void LockedKey::removeAllReleasable()
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
else if (it->second->evicting())
|
||||
{
|
||||
/// File segment is currently a removal candidate,
|
||||
/// we do not know if it will be removed or not yet,
|
||||
/// but its size is currently accounted as potentially removed,
|
||||
/// so if we remove file segment now, we break the freeable_count
|
||||
/// calculation in tryReserve.
|
||||
++it;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto file_segment = it->second->file_segment;
|
||||
it = removeFileSegment(file_segment->offset(), file_segment->lock());
|
||||
|
@ -22,7 +22,7 @@ struct FileSegmentMetadata : private boost::noncopyable
|
||||
|
||||
size_t size() const;
|
||||
|
||||
bool valid() const { return !removal_candidate.load(); }
|
||||
bool evicting() const { return removal_candidate.load(); }
|
||||
|
||||
Priority::Iterator getQueueIterator() const { return file_segment->getQueueIterator(); }
|
||||
|
||||
|
@ -833,6 +833,19 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
need_analyze_again = true;
|
||||
}
|
||||
|
||||
if (can_analyze_again
|
||||
&& settings.max_parallel_replicas > 1
|
||||
&& settings.allow_experimental_parallel_reading_from_replicas > 0
|
||||
&& settings.parallel_replicas_custom_key.value.empty()
|
||||
&& getTrivialCount(0).has_value())
|
||||
{
|
||||
/// The query could use trivial count if it didn't use parallel replicas, so let's disable it and reanalyze
|
||||
context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0));
|
||||
context->setSetting("max_parallel_replicas", UInt64{0});
|
||||
need_analyze_again = true;
|
||||
LOG_TRACE(log, "Disabling parallel replicas to be able to use a trivial count optimization");
|
||||
}
|
||||
|
||||
if (need_analyze_again)
|
||||
{
|
||||
size_t current_query_analyze_count = context->getQueryContext()->kitchen_sink.analyze_counter.load();
|
||||
@ -2254,79 +2267,84 @@ void InterpreterSelectQuery::addPrewhereAliasActions()
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum processing_stage, QueryPlan & query_plan)
|
||||
/// Based on the query analysis, check if optimizing the count trivial count to use totalRows is possible
|
||||
std::optional<UInt64> InterpreterSelectQuery::getTrivialCount(UInt64 max_parallel_replicas)
|
||||
{
|
||||
auto & query = getSelectQuery();
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
|
||||
/// Optimization for trivial query like SELECT count() FROM table.
|
||||
bool optimize_trivial_count =
|
||||
syntax_analyzer_result->optimize_trivial_count
|
||||
&& (settings.max_parallel_replicas <= 1)
|
||||
&& (max_parallel_replicas <= 1)
|
||||
&& !settings.allow_experimental_query_deduplication
|
||||
&& !settings.empty_result_for_aggregation_by_empty_set
|
||||
&& storage
|
||||
&& storage->getName() != "MaterializedMySQL"
|
||||
&& !storage->hasLightweightDeletedMask()
|
||||
&& query_info.filter_asts.empty()
|
||||
&& processing_stage == QueryProcessingStage::FetchColumns
|
||||
&& query_analyzer->hasAggregation()
|
||||
&& (query_analyzer->aggregates().size() == 1)
|
||||
&& typeid_cast<const AggregateFunctionCount *>(query_analyzer->aggregates()[0].function.get());
|
||||
|
||||
if (optimize_trivial_count)
|
||||
if (!optimize_trivial_count)
|
||||
return {};
|
||||
|
||||
auto & query = getSelectQuery();
|
||||
if (!query.prewhere() && !query.where() && !context->getCurrentTransaction())
|
||||
{
|
||||
return storage->totalRows(settings);
|
||||
}
|
||||
else
|
||||
{
|
||||
// It's possible to optimize count() given only partition predicates
|
||||
SelectQueryInfo temp_query_info;
|
||||
temp_query_info.query = query_ptr;
|
||||
temp_query_info.syntax_analyzer_result = syntax_analyzer_result;
|
||||
temp_query_info.prepared_sets = query_analyzer->getPreparedSets();
|
||||
|
||||
return storage->totalRowsByPartitionPredicate(temp_query_info, context);
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum processing_stage, QueryPlan & query_plan)
|
||||
{
|
||||
auto & query = getSelectQuery();
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
std::optional<UInt64> num_rows;
|
||||
|
||||
/// Optimization for trivial query like SELECT count() FROM table.
|
||||
if (processing_stage == QueryProcessingStage::FetchColumns && (num_rows = getTrivialCount(settings.max_parallel_replicas)))
|
||||
{
|
||||
const auto & desc = query_analyzer->aggregates()[0];
|
||||
const auto & func = desc.function;
|
||||
std::optional<UInt64> num_rows{};
|
||||
const AggregateFunctionCount & agg_count = static_cast<const AggregateFunctionCount &>(*func);
|
||||
|
||||
if (!query.prewhere() && !query.where() && !context->getCurrentTransaction())
|
||||
{
|
||||
num_rows = storage->totalRows(settings);
|
||||
}
|
||||
else // It's possible to optimize count() given only partition predicates
|
||||
{
|
||||
SelectQueryInfo temp_query_info;
|
||||
temp_query_info.query = query_ptr;
|
||||
temp_query_info.syntax_analyzer_result = syntax_analyzer_result;
|
||||
temp_query_info.prepared_sets = query_analyzer->getPreparedSets();
|
||||
/// We will process it up to "WithMergeableState".
|
||||
std::vector<char> state(agg_count.sizeOfData());
|
||||
AggregateDataPtr place = state.data();
|
||||
|
||||
num_rows = storage->totalRowsByPartitionPredicate(temp_query_info, context);
|
||||
}
|
||||
agg_count.create(place);
|
||||
SCOPE_EXIT_MEMORY_SAFE(agg_count.destroy(place));
|
||||
|
||||
if (num_rows)
|
||||
{
|
||||
const AggregateFunctionCount & agg_count = static_cast<const AggregateFunctionCount &>(*func);
|
||||
agg_count.set(place, *num_rows);
|
||||
|
||||
/// We will process it up to "WithMergeableState".
|
||||
std::vector<char> state(agg_count.sizeOfData());
|
||||
AggregateDataPtr place = state.data();
|
||||
auto column = ColumnAggregateFunction::create(func);
|
||||
column->insertFrom(place);
|
||||
|
||||
agg_count.create(place);
|
||||
SCOPE_EXIT_MEMORY_SAFE(agg_count.destroy(place));
|
||||
Block header = analysis_result.before_aggregation->getResultColumns();
|
||||
size_t arguments_size = desc.argument_names.size();
|
||||
DataTypes argument_types(arguments_size);
|
||||
for (size_t j = 0; j < arguments_size; ++j)
|
||||
argument_types[j] = header.getByName(desc.argument_names[j]).type;
|
||||
|
||||
agg_count.set(place, *num_rows);
|
||||
Block block_with_count{
|
||||
{std::move(column), std::make_shared<DataTypeAggregateFunction>(func, argument_types, desc.parameters), desc.column_name}};
|
||||
|
||||
auto column = ColumnAggregateFunction::create(func);
|
||||
column->insertFrom(place);
|
||||
|
||||
Block header = analysis_result.before_aggregation->getResultColumns();
|
||||
size_t arguments_size = desc.argument_names.size();
|
||||
DataTypes argument_types(arguments_size);
|
||||
for (size_t j = 0; j < arguments_size; ++j)
|
||||
argument_types[j] = header.getByName(desc.argument_names[j]).type;
|
||||
|
||||
Block block_with_count{
|
||||
{std::move(column), std::make_shared<DataTypeAggregateFunction>(func, argument_types, desc.parameters), desc.column_name}};
|
||||
|
||||
auto source = std::make_shared<SourceFromSingleChunk>(block_with_count);
|
||||
auto prepared_count = std::make_unique<ReadFromPreparedSource>(Pipe(std::move(source)));
|
||||
prepared_count->setStepDescription("Optimized trivial count");
|
||||
query_plan.addStep(std::move(prepared_count));
|
||||
from_stage = QueryProcessingStage::WithMergeableState;
|
||||
analysis_result.first_stage = false;
|
||||
return;
|
||||
}
|
||||
auto source = std::make_shared<SourceFromSingleChunk>(block_with_count);
|
||||
auto prepared_count = std::make_unique<ReadFromPreparedSource>(Pipe(std::move(source)));
|
||||
prepared_count->setStepDescription("Optimized trivial count");
|
||||
query_plan.addStep(std::move(prepared_count));
|
||||
from_stage = QueryProcessingStage::WithMergeableState;
|
||||
analysis_result.first_stage = false;
|
||||
return;
|
||||
}
|
||||
|
||||
/// Limitation on the number of columns to read.
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#include <Access/EnabledRowPolicies.h>
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
@ -187,6 +188,7 @@ private:
|
||||
void executeExtremes(QueryPlan & query_plan);
|
||||
void executeSubqueriesInSetsAndJoins(QueryPlan & query_plan);
|
||||
bool autoFinalOnQuery(ASTSelectQuery & select_query);
|
||||
std::optional<UInt64> getTrivialCount(UInt64 max_parallel_replicas);
|
||||
|
||||
enum class Modificator
|
||||
{
|
||||
|
@ -1,144 +0,0 @@
|
||||
#include <Interpreters/OptimizeDateFilterVisitor.h>
|
||||
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ASTPtr generateOptimizedDateFilterAST(const String & comparator, const String & converter, const String & column, UInt64 compare_to)
|
||||
{
|
||||
const DateLUTImpl & date_lut = DateLUT::instance();
|
||||
|
||||
String start_date;
|
||||
String end_date;
|
||||
|
||||
if (converter == "toYear")
|
||||
{
|
||||
UInt64 year = compare_to;
|
||||
start_date = date_lut.dateToString(date_lut.makeDayNum(year, 1, 1));
|
||||
end_date = date_lut.dateToString(date_lut.makeDayNum(year, 12, 31));
|
||||
}
|
||||
else if (converter == "toYYYYMM")
|
||||
{
|
||||
UInt64 year = compare_to / 100;
|
||||
UInt64 month = compare_to % 100;
|
||||
|
||||
if (month == 0 || month > 12) return {};
|
||||
|
||||
static constexpr UInt8 days_of_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
|
||||
|
||||
bool leap_year = (year & 3) == 0 && (year % 100 || (year % 400 == 0 && year));
|
||||
|
||||
start_date = date_lut.dateToString(date_lut.makeDayNum(year, month, 1));
|
||||
end_date = date_lut.dateToString(date_lut.makeDayNum(year, month, days_of_month[month - 1] + (leap_year && month == 2)));
|
||||
}
|
||||
else
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
if (comparator == "equals")
|
||||
{
|
||||
return makeASTFunction("and",
|
||||
makeASTFunction("greaterOrEquals",
|
||||
std::make_shared<ASTIdentifier>(column),
|
||||
std::make_shared<ASTLiteral>(start_date)
|
||||
),
|
||||
makeASTFunction("lessOrEquals",
|
||||
std::make_shared<ASTIdentifier>(column),
|
||||
std::make_shared<ASTLiteral>(end_date)
|
||||
)
|
||||
);
|
||||
}
|
||||
else if (comparator == "notEquals")
|
||||
{
|
||||
return makeASTFunction("or",
|
||||
makeASTFunction("less",
|
||||
std::make_shared<ASTIdentifier>(column),
|
||||
std::make_shared<ASTLiteral>(start_date)
|
||||
),
|
||||
makeASTFunction("greater",
|
||||
std::make_shared<ASTIdentifier>(column),
|
||||
std::make_shared<ASTLiteral>(end_date)
|
||||
)
|
||||
);
|
||||
}
|
||||
else if (comparator == "less" || comparator == "greaterOrEquals")
|
||||
{
|
||||
return makeASTFunction(comparator,
|
||||
std::make_shared<ASTIdentifier>(column),
|
||||
std::make_shared<ASTLiteral>(start_date)
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
return makeASTFunction(comparator,
|
||||
std::make_shared<ASTIdentifier>(column),
|
||||
std::make_shared<ASTLiteral>(end_date)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
bool rewritePredicateInPlace(ASTFunction & function, ASTPtr & ast)
|
||||
{
|
||||
const static std::unordered_map<String, String> swap_relations = {
|
||||
{"equals", "equals"},
|
||||
{"notEquals", "notEquals"},
|
||||
{"less", "greater"},
|
||||
{"greater", "less"},
|
||||
{"lessOrEquals", "greaterOrEquals"},
|
||||
{"greaterOrEquals", "lessOrEquals"},
|
||||
};
|
||||
|
||||
if (!swap_relations.contains(function.name)) return false;
|
||||
|
||||
if (!function.arguments || function.arguments->children.size() != 2) return false;
|
||||
|
||||
size_t func_id = function.arguments->children.size();
|
||||
|
||||
for (size_t i = 0; i < function.arguments->children.size(); i++)
|
||||
{
|
||||
if (const auto * func = function.arguments->children[i]->as<ASTFunction>(); func)
|
||||
{
|
||||
if (func->name == "toYear" || func->name == "toYYYYMM")
|
||||
{
|
||||
func_id = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (func_id == function.arguments->children.size()) return false;
|
||||
|
||||
size_t literal_id = 1 - func_id;
|
||||
const auto * literal = function.arguments->children[literal_id]->as<ASTLiteral>();
|
||||
|
||||
if (!literal || literal->value.getType() != Field::Types::UInt64) return false;
|
||||
|
||||
UInt64 compare_to = literal->value.get<UInt64>();
|
||||
String comparator = literal_id > func_id ? function.name : swap_relations.at(function.name);
|
||||
|
||||
const auto * func = function.arguments->children[func_id]->as<ASTFunction>();
|
||||
const auto * column_id = func->arguments->children.at(0)->as<ASTIdentifier>();
|
||||
|
||||
if (!column_id) return false;
|
||||
|
||||
String column = column_id->name();
|
||||
|
||||
const auto new_ast = generateOptimizedDateFilterAST(comparator, func->name, column, compare_to);
|
||||
|
||||
if (!new_ast) return false;
|
||||
|
||||
ast = new_ast;
|
||||
return true;
|
||||
}
|
||||
|
||||
void OptimizeDateFilterInPlaceData::visit(ASTFunction & function, ASTPtr & ast) const
|
||||
{
|
||||
rewritePredicateInPlace(function, ast);
|
||||
}
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ASTFunction;
|
||||
|
||||
/// Rewrite the predicates in place
|
||||
class OptimizeDateFilterInPlaceData
|
||||
{
|
||||
public:
|
||||
using TypeToVisit = ASTFunction;
|
||||
void visit(ASTFunction & function, ASTPtr & ast) const;
|
||||
};
|
||||
|
||||
using OptimizeDateFilterInPlaceMatcher = OneTypeMatcher<OptimizeDateFilterInPlaceData>;
|
||||
using OptimizeDateFilterInPlaceVisitor = InDepthNodeVisitor<OptimizeDateFilterInPlaceMatcher, true>;
|
||||
}
|
@ -25,7 +25,6 @@
|
||||
#include <Interpreters/GatherFunctionQuantileVisitor.h>
|
||||
#include <Interpreters/RewriteSumIfFunctionVisitor.h>
|
||||
#include <Interpreters/RewriteArrayExistsFunctionVisitor.h>
|
||||
#include <Interpreters/OptimizeDateFilterVisitor.h>
|
||||
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
@ -678,21 +677,6 @@ void optimizeInjectiveFunctionsInsideUniq(ASTPtr & query, ContextPtr context)
|
||||
RemoveInjectiveFunctionsVisitor(data).visit(query);
|
||||
}
|
||||
|
||||
void optimizeDateFilters(ASTSelectQuery * select_query)
|
||||
{
|
||||
/// Predicates in HAVING clause has been moved to WHERE clause.
|
||||
if (select_query->where())
|
||||
{
|
||||
OptimizeDateFilterInPlaceVisitor::Data data;
|
||||
OptimizeDateFilterInPlaceVisitor(data).visit(select_query->refWhere());
|
||||
}
|
||||
if (select_query->prewhere())
|
||||
{
|
||||
OptimizeDateFilterInPlaceVisitor::Data data;
|
||||
OptimizeDateFilterInPlaceVisitor(data).visit(select_query->refPrewhere());
|
||||
}
|
||||
}
|
||||
|
||||
void transformIfStringsIntoEnum(ASTPtr & query)
|
||||
{
|
||||
std::unordered_set<String> function_names = {"if", "transform"};
|
||||
@ -796,9 +780,6 @@ void TreeOptimizer::apply(ASTPtr & query, TreeRewriterResult & result,
|
||||
tables_with_columns, result.storage_snapshot->metadata, result.storage);
|
||||
}
|
||||
|
||||
/// Rewrite date filters to avoid the calls of converters such as toYear, toYYYYMM, toISOWeek, etc.
|
||||
optimizeDateFilters(select_query);
|
||||
|
||||
/// GROUP BY injective function elimination.
|
||||
optimizeGroupBy(select_query, context);
|
||||
|
||||
|
@ -534,7 +534,7 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co
|
||||
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint)
|
||||
{
|
||||
bool is_null = from_value.isNull();
|
||||
if (is_null && !to_type.isNullable())
|
||||
if (is_null && !to_type.isNullable() && !to_type.isLowCardinalityNullable())
|
||||
throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert NULL to {}", to_type.getName());
|
||||
|
||||
Field converted = convertFieldToType(from_value, to_type, from_type_hint);
|
||||
|
@ -44,6 +44,9 @@ ThreadPoolCallbackRunner<Result, Callback> threadPoolCallbackRunner(ThreadPool &
|
||||
|
||||
auto future = task->get_future();
|
||||
|
||||
/// ThreadPool is using "bigger is higher priority" instead of "smaller is more priority".
|
||||
/// Note: calling method scheduleOrThrowOnError in intentional, because we don't want to throw exceptions
|
||||
/// in critical places where this callback runner is used (e.g. loading or deletion of parts)
|
||||
my_pool->scheduleOrThrowOnError([my_task = std::move(task)]{ (*my_task)(); }, priority);
|
||||
|
||||
return future;
|
||||
|
@ -170,7 +170,7 @@ bool applyTrivialCountIfPossible(
|
||||
QueryPlan & query_plan,
|
||||
const TableNode & table_node,
|
||||
const QueryTreeNodePtr & query_tree,
|
||||
const ContextPtr & query_context,
|
||||
ContextMutablePtr & query_context,
|
||||
const Names & columns_names)
|
||||
{
|
||||
const auto & settings = query_context->getSettingsRef();
|
||||
@ -208,8 +208,7 @@ bool applyTrivialCountIfPossible(
|
||||
if (storage->hasLightweightDeletedMask())
|
||||
return false;
|
||||
|
||||
if (settings.max_parallel_replicas > 1 ||
|
||||
settings.allow_experimental_query_deduplication
|
||||
if (settings.allow_experimental_query_deduplication
|
||||
|| settings.empty_result_for_aggregation_by_empty_set)
|
||||
return false;
|
||||
|
||||
@ -228,6 +227,18 @@ bool applyTrivialCountIfPossible(
|
||||
if (!num_rows)
|
||||
return false;
|
||||
|
||||
if (settings.max_parallel_replicas > 1)
|
||||
{
|
||||
if (!settings.parallel_replicas_custom_key.value.empty() || settings.allow_experimental_parallel_reading_from_replicas == 0)
|
||||
return false;
|
||||
|
||||
/// The query could use trivial count if it didn't use parallel replicas, so let's disable it
|
||||
query_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0));
|
||||
query_context->setSetting("max_parallel_replicas", UInt64{0});
|
||||
LOG_TRACE(&Poco::Logger::get("Planner"), "Disabling parallel replicas to be able to use a trivial count optimization");
|
||||
|
||||
}
|
||||
|
||||
/// Set aggregation state
|
||||
const AggregateFunctionCount & agg_count = *count_func;
|
||||
std::vector<char> state(agg_count.sizeOfData());
|
||||
@ -619,7 +630,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
||||
is_single_table_expression &&
|
||||
table_node &&
|
||||
select_query_info.has_aggregates &&
|
||||
applyTrivialCountIfPossible(query_plan, *table_node, select_query_info.query_tree, planner_context->getQueryContext(), table_expression_data.getColumnNames());
|
||||
applyTrivialCountIfPossible(query_plan, *table_node, select_query_info.query_tree, planner_context->getMutableQueryContext(), table_expression_data.getColumnNames());
|
||||
|
||||
if (is_trivial_count_applied)
|
||||
{
|
||||
|
@ -176,13 +176,16 @@ static AvroDeserializer::DeserializeFn createDecimalDeserializeFn(const avro::No
|
||||
{
|
||||
static constexpr size_t field_type_size = sizeof(typename DecimalType::FieldType);
|
||||
decoder.decodeString(tmp);
|
||||
if (tmp.size() != field_type_size)
|
||||
if (tmp.size() > field_type_size)
|
||||
throw ParsingException(
|
||||
ErrorCodes::CANNOT_PARSE_UUID,
|
||||
"Cannot parse type {}, expected binary data with size {}, got {}",
|
||||
"Cannot parse type {}, expected binary data with size equal to or less than {}, got {}",
|
||||
target_type->getName(),
|
||||
field_type_size,
|
||||
tmp.size());
|
||||
else if (tmp.size() != field_type_size)
|
||||
/// Add padding with 0-bytes.
|
||||
tmp = std::string(field_type_size - tmp.size(), '\0') + tmp;
|
||||
|
||||
typename DecimalType::FieldType field;
|
||||
ReadBufferFromString buf(tmp);
|
||||
@ -256,8 +259,7 @@ AvroDeserializer::DeserializeFn AvroDeserializer::createDeserializeFn(const avro
|
||||
if (tmp.length() != 36)
|
||||
throw ParsingException(ErrorCodes::CANNOT_PARSE_UUID, "Cannot parse uuid {}", tmp);
|
||||
|
||||
UUID uuid;
|
||||
parseUUID(reinterpret_cast<const UInt8 *>(tmp.data()), std::reverse_iterator<UInt8 *>(reinterpret_cast<UInt8 *>(&uuid) + 16));
|
||||
const UUID uuid = parseUUID({reinterpret_cast<const UInt8 *>(tmp.data()), tmp.length()});
|
||||
assert_cast<DataTypeUUID::ColumnType &>(column).insertValue(uuid);
|
||||
return true;
|
||||
};
|
||||
|
@ -329,9 +329,8 @@ AvroSerializer::SchemaWithSerializeFn AvroSerializer::createSchemaWithSerializeF
|
||||
return {schema, [](const IColumn & column, size_t row_num, avro::Encoder & encoder)
|
||||
{
|
||||
const auto & uuid = assert_cast<const DataTypeUUID::ColumnType &>(column).getElement(row_num);
|
||||
std::array<UInt8, 36> s;
|
||||
formatUUID(std::reverse_iterator<const UInt8 *>(reinterpret_cast<const UInt8 *>(&uuid) + 16), s.data());
|
||||
encoder.encodeBytes(reinterpret_cast<const uint8_t *>(s.data()), s.size());
|
||||
const auto serialized_uuid = formatUUID(uuid);
|
||||
encoder.encodeBytes(reinterpret_cast<const uint8_t *>(serialized_uuid.data()), serialized_uuid.size());
|
||||
}};
|
||||
}
|
||||
case TypeIndex::Array:
|
||||
|
@ -202,6 +202,13 @@ bool DataPartStorageOnDiskBase::isStoredOnRemoteDisk() const
|
||||
return volume->getDisk()->isRemote();
|
||||
}
|
||||
|
||||
std::optional<String> DataPartStorageOnDiskBase::getCacheName() const
|
||||
{
|
||||
if (volume->getDisk()->supportsCache())
|
||||
return volume->getDisk()->getCacheName();
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
bool DataPartStorageOnDiskBase::supportZeroCopyReplication() const
|
||||
{
|
||||
return volume->getDisk()->supportZeroCopyReplication();
|
||||
|
@ -36,6 +36,7 @@ public:
|
||||
std::string getDiskName() const override;
|
||||
std::string getDiskType() const override;
|
||||
bool isStoredOnRemoteDisk() const override;
|
||||
std::optional<String> getCacheName() const override;
|
||||
bool supportZeroCopyReplication() const override;
|
||||
bool supportParallelWrite() const override;
|
||||
bool isBroken() const override;
|
||||
|
@ -149,6 +149,7 @@ public:
|
||||
virtual std::string getDiskName() const = 0;
|
||||
virtual std::string getDiskType() const = 0;
|
||||
virtual bool isStoredOnRemoteDisk() const { return false; }
|
||||
virtual std::optional<String> getCacheName() const { return std::nullopt; }
|
||||
virtual bool supportZeroCopyReplication() const { return false; }
|
||||
virtual bool supportParallelWrite() const = 0;
|
||||
virtual bool isBroken() const = 0;
|
||||
|
@ -130,10 +130,6 @@ namespace ProfileEvents
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
extern const Metric DelayedInserts;
|
||||
extern const Metric MergeTreePartsLoaderThreads;
|
||||
extern const Metric MergeTreePartsLoaderThreadsActive;
|
||||
extern const Metric MergeTreePartsCleanerThreads;
|
||||
extern const Metric MergeTreePartsCleanerThreadsActive;
|
||||
}
|
||||
|
||||
|
||||
@ -1425,71 +1421,17 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPartWithRetries(
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
std::vector<MergeTreeData::LoadPartResult> MergeTreeData::loadDataPartsFromDisk(
|
||||
ThreadPool & pool,
|
||||
size_t num_parts,
|
||||
std::queue<PartLoadingTreeNodes> & parts_queue,
|
||||
const MergeTreeSettingsPtr & settings)
|
||||
std::vector<MergeTreeData::LoadPartResult> MergeTreeData::loadDataPartsFromDisk(PartLoadingTreeNodes & parts_to_load)
|
||||
{
|
||||
/// Parallel loading of data parts.
|
||||
pool.setMaxThreads(std::min(static_cast<size_t>(settings->max_part_loading_threads), num_parts));
|
||||
size_t num_threads = pool.getMaxThreads();
|
||||
LOG_DEBUG(log, "Going to use {} threads to load parts", num_threads);
|
||||
const size_t num_parts = parts_to_load.size();
|
||||
|
||||
std::vector<size_t> parts_per_thread(num_threads, num_parts / num_threads);
|
||||
for (size_t i = 0ul; i < num_parts % num_threads; ++i)
|
||||
++parts_per_thread[i];
|
||||
LOG_DEBUG(log, "Will load {} number of parts using {} threads", num_parts, getActivePartsLoadingThreadPool().get().getMaxThreads());
|
||||
|
||||
/// Prepare data parts for parallel loading. Threads will focus on given disk first, then steal
|
||||
/// others' tasks when finish current disk part loading process.
|
||||
std::vector<PartLoadingTreeNodes> threads_parts(num_threads);
|
||||
std::set<size_t> remaining_thread_parts;
|
||||
std::queue<size_t> threads_queue;
|
||||
/// Shuffle all the parts randomly to possible speed up loading them from JBOD.
|
||||
std::shuffle(parts_to_load.begin(), parts_to_load.end(), thread_local_rng);
|
||||
|
||||
for (size_t i = 0; i < num_threads; ++i)
|
||||
{
|
||||
remaining_thread_parts.insert(i);
|
||||
threads_queue.push(i);
|
||||
}
|
||||
|
||||
while (!parts_queue.empty())
|
||||
{
|
||||
assert(!threads_queue.empty());
|
||||
size_t i = threads_queue.front();
|
||||
auto & need_parts = parts_per_thread[i];
|
||||
assert(need_parts > 0);
|
||||
|
||||
auto & thread_parts = threads_parts[i];
|
||||
auto & current_parts = parts_queue.front();
|
||||
assert(!current_parts.empty());
|
||||
|
||||
auto parts_to_grab = std::min(need_parts, current_parts.size());
|
||||
thread_parts.insert(thread_parts.end(), current_parts.end() - parts_to_grab, current_parts.end());
|
||||
current_parts.resize(current_parts.size() - parts_to_grab);
|
||||
need_parts -= parts_to_grab;
|
||||
|
||||
/// Before processing next thread, change disk if possible.
|
||||
/// Different threads will likely start loading parts from different disk,
|
||||
/// which may improve read parallelism for JBOD.
|
||||
|
||||
/// If current disk still has some parts, push it to the tail.
|
||||
if (!current_parts.empty())
|
||||
parts_queue.push(std::move(current_parts));
|
||||
|
||||
parts_queue.pop();
|
||||
|
||||
/// If current thread still want some parts, push it to the tail.
|
||||
if (need_parts > 0)
|
||||
threads_queue.push(i);
|
||||
|
||||
threads_queue.pop();
|
||||
}
|
||||
|
||||
assert(threads_queue.empty());
|
||||
assert(std::all_of(threads_parts.begin(), threads_parts.end(), [](const auto & parts)
|
||||
{
|
||||
return !parts.empty();
|
||||
}));
|
||||
auto runner = threadPoolCallbackRunner<void>(getActivePartsLoadingThreadPool().get(), "ActiveParts");
|
||||
std::vector<std::future<void>> parts_futures;
|
||||
|
||||
std::mutex part_select_mutex;
|
||||
std::mutex part_loading_mutex;
|
||||
@ -1498,81 +1440,77 @@ std::vector<MergeTreeData::LoadPartResult> MergeTreeData::loadDataPartsFromDisk(
|
||||
|
||||
try
|
||||
{
|
||||
for (size_t thread = 0; thread < num_threads; ++thread)
|
||||
while (true)
|
||||
{
|
||||
pool.scheduleOrThrowOnError([&, thread, thread_group = CurrentThread::getGroup()]
|
||||
bool are_parts_to_load_empty = false;
|
||||
{
|
||||
SCOPE_EXIT_SAFE(
|
||||
if (thread_group)
|
||||
CurrentThread::detachFromGroupIfNotDetached();
|
||||
);
|
||||
if (thread_group)
|
||||
CurrentThread::attachToGroupIfDetached(thread_group);
|
||||
std::lock_guard lock(part_select_mutex);
|
||||
are_parts_to_load_empty = parts_to_load.empty();
|
||||
}
|
||||
|
||||
while (true)
|
||||
if (are_parts_to_load_empty)
|
||||
{
|
||||
/// Wait for all scheduled tasks.
|
||||
/// We have to use .get() method to rethrow any exception that could occur.
|
||||
for (auto & future: parts_futures)
|
||||
future.get();
|
||||
parts_futures.clear();
|
||||
/// At this point it is possible, that some other parts appeared in the queue for processing (parts_to_load),
|
||||
/// because we added them from inside the pool.
|
||||
/// So we need to recheck it.
|
||||
}
|
||||
|
||||
PartLoadingTree::NodePtr current_part;
|
||||
{
|
||||
std::lock_guard lock(part_select_mutex);
|
||||
if (parts_to_load.empty())
|
||||
break;
|
||||
|
||||
current_part = parts_to_load.back();
|
||||
parts_to_load.pop_back();
|
||||
}
|
||||
|
||||
parts_futures.push_back(runner(
|
||||
[&, part = std::move(current_part)]()
|
||||
{
|
||||
PartLoadingTree::NodePtr thread_part;
|
||||
size_t thread_idx = thread;
|
||||
|
||||
{
|
||||
std::lock_guard lock{part_select_mutex};
|
||||
|
||||
if (remaining_thread_parts.empty())
|
||||
return;
|
||||
|
||||
/// Steal task if nothing to do
|
||||
if (threads_parts[thread].empty())
|
||||
{
|
||||
// Try random steal tasks from the next thread
|
||||
std::uniform_int_distribution<size_t> distribution(0, remaining_thread_parts.size() - 1);
|
||||
auto it = remaining_thread_parts.begin();
|
||||
std::advance(it, distribution(thread_local_rng));
|
||||
thread_idx = *it;
|
||||
}
|
||||
|
||||
auto & thread_parts = threads_parts[thread_idx];
|
||||
thread_part = thread_parts.back();
|
||||
thread_parts.pop_back();
|
||||
if (thread_parts.empty())
|
||||
remaining_thread_parts.erase(thread_idx);
|
||||
}
|
||||
|
||||
/// Pass a separate mutex to guard the set of parts, because this lambda
|
||||
/// is called concurrently but with already locked @data_parts_mutex.
|
||||
auto res = loadDataPartWithRetries(
|
||||
thread_part->info, thread_part->name, thread_part->disk,
|
||||
part->info, part->name, part->disk,
|
||||
DataPartState::Active, part_loading_mutex, loading_parts_initial_backoff_ms,
|
||||
loading_parts_max_backoff_ms, loading_parts_max_tries);
|
||||
|
||||
thread_part->is_loaded = true;
|
||||
part->is_loaded = true;
|
||||
bool is_active_part = res.part->getState() == DataPartState::Active;
|
||||
|
||||
/// If part is broken or duplicate or should be removed according to transaction
|
||||
/// and it has any covered parts then try to load them to replace this part.
|
||||
if (!is_active_part && !thread_part->children.empty())
|
||||
if (!is_active_part && !part->children.empty())
|
||||
{
|
||||
std::lock_guard lock{part_select_mutex};
|
||||
for (const auto & [_, node] : thread_part->children)
|
||||
threads_parts[thread].push_back(node);
|
||||
remaining_thread_parts.insert(thread);
|
||||
for (const auto & [_, node] : part->children)
|
||||
parts_to_load.push_back(node);
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard lock(part_loading_mutex);
|
||||
loaded_parts.push_back(std::move(res));
|
||||
}
|
||||
}
|
||||
});
|
||||
}, Priority{0}));
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// If this is not done, then in case of an exception, tasks will be destroyed before the threads are completed, and it will be bad.
|
||||
pool.wait();
|
||||
/// Wait for all scheduled tasks
|
||||
/// A future becomes invalid after .get() call
|
||||
/// + .wait() method is used not to throw any exception here.
|
||||
for (auto & future: parts_futures)
|
||||
if (future.valid())
|
||||
future.wait();
|
||||
|
||||
throw;
|
||||
}
|
||||
|
||||
pool.wait();
|
||||
return loaded_parts;
|
||||
}
|
||||
|
||||
@ -1679,9 +1617,12 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
}
|
||||
}
|
||||
|
||||
ThreadPool pool(CurrentMetrics::MergeTreePartsLoaderThreads, CurrentMetrics::MergeTreePartsLoaderThreadsActive, disks.size());
|
||||
auto runner = threadPoolCallbackRunner<void>(getActivePartsLoadingThreadPool().get(), "ActiveParts");
|
||||
std::vector<PartLoadingTree::PartLoadingInfos> parts_to_load_by_disk(disks.size());
|
||||
|
||||
std::vector<std::future<void>> disks_futures;
|
||||
disks_futures.reserve(disks.size());
|
||||
|
||||
for (size_t i = 0; i < disks.size(); ++i)
|
||||
{
|
||||
const auto & disk_ptr = disks[i];
|
||||
@ -1690,7 +1631,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
|
||||
auto & disk_parts = parts_to_load_by_disk[i];
|
||||
|
||||
pool.scheduleOrThrowOnError([&, disk_ptr]()
|
||||
disks_futures.push_back(runner([&, disk_ptr]()
|
||||
{
|
||||
for (auto it = disk_ptr->iterateDirectory(relative_data_path); it->isValid(); it->next())
|
||||
{
|
||||
@ -1703,38 +1644,31 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
if (auto part_info = MergeTreePartInfo::tryParsePartName(it->name(), format_version))
|
||||
disk_parts.emplace_back(*part_info, it->name(), disk_ptr);
|
||||
}
|
||||
});
|
||||
}, Priority{0}));
|
||||
}
|
||||
|
||||
pool.wait();
|
||||
/// For iteration to be completed
|
||||
/// Any exception will be re-thrown.
|
||||
for (auto & future : disks_futures)
|
||||
future.get();
|
||||
disks_futures.clear();
|
||||
|
||||
PartLoadingTree::PartLoadingInfos parts_to_load;
|
||||
for (auto & disk_parts : parts_to_load_by_disk)
|
||||
std::move(disk_parts.begin(), disk_parts.end(), std::back_inserter(parts_to_load));
|
||||
|
||||
auto loading_tree = PartLoadingTree::build(std::move(parts_to_load));
|
||||
/// Collect parts by disks' names.
|
||||
std::map<String, PartLoadingTreeNodes> disk_part_map;
|
||||
|
||||
size_t num_parts = 0;
|
||||
PartLoadingTreeNodes active_parts;
|
||||
|
||||
/// Collect only "the most covering" parts from the top level of the tree.
|
||||
loading_tree.traverse(/*recursive=*/ false, [&](const auto & node)
|
||||
{
|
||||
disk_part_map[node->disk->getName()].emplace_back(node);
|
||||
active_parts.emplace_back(node);
|
||||
});
|
||||
|
||||
size_t num_parts = 0;
|
||||
std::queue<PartLoadingTreeNodes> parts_queue;
|
||||
|
||||
for (auto & [disk_name, disk_parts] : disk_part_map)
|
||||
{
|
||||
LOG_INFO(log, "Found {} parts for disk '{}' to load", disk_parts.size(), disk_name);
|
||||
|
||||
if (disk_parts.empty())
|
||||
continue;
|
||||
|
||||
num_parts += disk_parts.size();
|
||||
parts_queue.push(std::move(disk_parts));
|
||||
}
|
||||
num_parts += active_parts.size();
|
||||
|
||||
auto part_lock = lockParts();
|
||||
LOG_TEST(log, "loadDataParts: clearing data_parts_indexes (had {} parts)", data_parts_indexes.size());
|
||||
@ -1754,7 +1688,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
|
||||
if (num_parts > 0)
|
||||
{
|
||||
auto loaded_parts = loadDataPartsFromDisk(pool, num_parts, parts_queue, settings);
|
||||
auto loaded_parts = loadDataPartsFromDisk(active_parts);
|
||||
|
||||
for (const auto & res : loaded_parts)
|
||||
{
|
||||
@ -1783,10 +1717,12 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
|
||||
if (settings->in_memory_parts_enable_wal)
|
||||
{
|
||||
pool.setMaxThreads(disks.size());
|
||||
std::vector<MutableDataPartsVector> disks_wal_parts(disks.size());
|
||||
std::mutex wal_init_lock;
|
||||
|
||||
std::vector<std::future<void>> wal_disks_futures;
|
||||
wal_disks_futures.reserve(disks.size());
|
||||
|
||||
for (size_t i = 0; i < disks.size(); ++i)
|
||||
{
|
||||
const auto & disk_ptr = disks[i];
|
||||
@ -1795,7 +1731,7 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
|
||||
auto & disk_wal_parts = disks_wal_parts[i];
|
||||
|
||||
pool.scheduleOrThrowOnError([&, disk_ptr]()
|
||||
wal_disks_futures.push_back(runner([&, disk_ptr]()
|
||||
{
|
||||
for (auto it = disk_ptr->iterateDirectory(relative_data_path); it->isValid(); it->next())
|
||||
{
|
||||
@ -1821,10 +1757,14 @@ void MergeTreeData::loadDataParts(bool skip_sanity_checks)
|
||||
disk_wal_parts.push_back(std::move(part));
|
||||
}
|
||||
}
|
||||
});
|
||||
}, Priority{0}));
|
||||
}
|
||||
|
||||
pool.wait();
|
||||
/// For for iteration to be completed
|
||||
/// Any exception will be re-thrown.
|
||||
for (auto & future : wal_disks_futures)
|
||||
future.get();
|
||||
wal_disks_futures.clear();
|
||||
|
||||
MutableDataPartsVector parts_from_wal;
|
||||
for (auto & disk_wal_parts : disks_wal_parts)
|
||||
@ -1925,7 +1865,7 @@ try
|
||||
|
||||
std::atomic_size_t num_loaded_parts = 0;
|
||||
|
||||
auto runner = threadPoolCallbackRunner<void>(OutdatedPartsLoadingThreadPool::get(), "OutdatedParts");
|
||||
auto runner = threadPoolCallbackRunner<void>(getOutdatedPartsLoadingThreadPool().get(), "OutdatedParts");
|
||||
std::vector<std::future<void>> parts_futures;
|
||||
|
||||
while (true)
|
||||
@ -1938,8 +1878,10 @@ try
|
||||
if (is_async && outdated_data_parts_loading_canceled)
|
||||
{
|
||||
/// Wait for every scheduled task
|
||||
/// In case of any exception it will be re-thrown and server will be terminated.
|
||||
for (auto & future : parts_futures)
|
||||
future.wait();
|
||||
future.get();
|
||||
parts_futures.clear();
|
||||
|
||||
LOG_DEBUG(log,
|
||||
"Stopped loading outdated data parts because task was canceled. "
|
||||
@ -1973,7 +1915,7 @@ try
|
||||
|
||||
/// Wait for every scheduled task
|
||||
for (auto & future : parts_futures)
|
||||
future.wait();
|
||||
future.get();
|
||||
|
||||
LOG_DEBUG(log, "Loaded {} outdated data parts {}",
|
||||
num_loaded_parts, is_async ? "asynchronously" : "synchronously");
|
||||
@ -1999,6 +1941,13 @@ void MergeTreeData::waitForOutdatedPartsToBeLoaded() const TSA_NO_THREAD_SAFETY_
|
||||
if (isStaticStorage())
|
||||
return;
|
||||
|
||||
/// We need to load parts as fast as possible
|
||||
getOutdatedPartsLoadingThreadPool().enableTurboMode();
|
||||
SCOPE_EXIT({
|
||||
/// Let's lower the number of threads e.g. for later ATTACH queries to behave as usual
|
||||
getOutdatedPartsLoadingThreadPool().disableTurboMode();
|
||||
});
|
||||
|
||||
LOG_TRACE(log, "Will wait for outdated data parts to be loaded");
|
||||
|
||||
std::unique_lock lock(outdated_data_parts_mutex);
|
||||
@ -2420,20 +2369,15 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t
|
||||
}
|
||||
};
|
||||
|
||||
if (settings->max_part_removal_threads <= 1 || parts_to_remove.size() <= settings->concurrent_part_removal_threshold)
|
||||
if (parts_to_remove.size() <= settings->concurrent_part_removal_threshold)
|
||||
{
|
||||
remove_single_thread();
|
||||
return;
|
||||
}
|
||||
|
||||
/// Parallel parts removal.
|
||||
size_t num_threads = settings->max_part_removal_threads;
|
||||
if (!num_threads)
|
||||
num_threads = getNumberOfPhysicalCPUCores() * 2;
|
||||
num_threads = std::min<size_t>(num_threads, parts_to_remove.size());
|
||||
std::mutex part_names_mutex;
|
||||
ThreadPool pool(CurrentMetrics::MergeTreePartsCleanerThreads, CurrentMetrics::MergeTreePartsCleanerThreadsActive,
|
||||
num_threads, num_threads, /* unlimited queue size */ 0);
|
||||
auto runner = threadPoolCallbackRunner<void>(getPartsCleaningThreadPool().get(), "PartsCleaning");
|
||||
|
||||
/// This flag disallow straightforward concurrent parts removal. It's required only in case
|
||||
/// when we have parts on zero-copy disk + at least some of them were mutated.
|
||||
@ -2453,27 +2397,27 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t
|
||||
LOG_DEBUG(
|
||||
log, "Removing {} parts from filesystem (concurrently): Parts: [{}]", parts_to_remove.size(), fmt::join(parts_to_remove, ", "));
|
||||
|
||||
std::vector<std::future<void>> parts_to_remove_futures;
|
||||
parts_to_remove_futures.reserve(parts_to_remove.size());
|
||||
|
||||
for (const DataPartPtr & part : parts_to_remove)
|
||||
{
|
||||
pool.scheduleOrThrowOnError([&part, &part_names_mutex, part_names_succeed, thread_group = CurrentThread::getGroup()]
|
||||
parts_to_remove_futures.push_back(runner([&part, &part_names_mutex, part_names_succeed, thread_group = CurrentThread::getGroup()]
|
||||
{
|
||||
SCOPE_EXIT_SAFE(
|
||||
if (thread_group)
|
||||
CurrentThread::detachFromGroupIfNotDetached();
|
||||
);
|
||||
if (thread_group)
|
||||
CurrentThread::attachToGroupIfDetached(thread_group);
|
||||
|
||||
asMutableDeletingPart(part)->remove();
|
||||
if (part_names_succeed)
|
||||
{
|
||||
std::lock_guard lock(part_names_mutex);
|
||||
part_names_succeed->insert(part->name);
|
||||
}
|
||||
});
|
||||
}, Priority{0}));
|
||||
}
|
||||
|
||||
pool.wait();
|
||||
/// Any exception will be re-thrown.
|
||||
for (auto & future : parts_to_remove_futures)
|
||||
future.get();
|
||||
parts_to_remove_futures.clear();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2544,20 +2488,15 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t
|
||||
return independent_ranges;
|
||||
};
|
||||
|
||||
auto schedule_parts_removal = [this, &pool, &part_names_mutex, part_names_succeed](
|
||||
std::vector<std::future<void>> part_removal_futures;
|
||||
|
||||
auto schedule_parts_removal = [this, &runner, &part_names_mutex, part_names_succeed, &part_removal_futures](
|
||||
const MergeTreePartInfo & range, DataPartsVector && parts_in_range)
|
||||
{
|
||||
/// Below, range should be captured by copy to avoid use-after-scope on exception from pool
|
||||
pool.scheduleOrThrowOnError(
|
||||
[this, range, &part_names_mutex, part_names_succeed, thread_group = CurrentThread::getGroup(), batch = std::move(parts_in_range)]
|
||||
part_removal_futures.push_back(runner(
|
||||
[this, range, &part_names_mutex, part_names_succeed, batch = std::move(parts_in_range)]
|
||||
{
|
||||
SCOPE_EXIT_SAFE(
|
||||
if (thread_group)
|
||||
CurrentThread::detachFromGroupIfNotDetached();
|
||||
);
|
||||
if (thread_group)
|
||||
CurrentThread::attachToGroupIfDetached(thread_group);
|
||||
|
||||
LOG_TRACE(log, "Removing {} parts in blocks range {}", batch.size(), range.getPartNameForLogs());
|
||||
|
||||
for (const auto & part : batch)
|
||||
@ -2569,7 +2508,7 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t
|
||||
part_names_succeed->insert(part->name);
|
||||
}
|
||||
}
|
||||
});
|
||||
}, Priority{0}));
|
||||
};
|
||||
|
||||
RemovalRanges independent_ranges = split_into_independent_ranges(parts_to_remove, /* split_times */ 0);
|
||||
@ -2632,7 +2571,11 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t
|
||||
LOG_TRACE(log, "Will remove {} big parts separately: {}", excluded_parts.size(), fmt::join(excluded_parts, ", "));
|
||||
|
||||
independent_ranges = split_into_independent_ranges(excluded_parts, /* split_times */ 0);
|
||||
pool.wait();
|
||||
|
||||
/// Any exception will be re-thrown.
|
||||
for (auto & future : part_removal_futures)
|
||||
future.get();
|
||||
part_removal_futures.clear();
|
||||
|
||||
for (size_t i = 0; i < independent_ranges.infos.size(); ++i)
|
||||
{
|
||||
@ -2641,7 +2584,10 @@ void MergeTreeData::clearPartsFromFilesystemImpl(const DataPartsVector & parts_t
|
||||
schedule_parts_removal(range, std::move(parts_in_range));
|
||||
}
|
||||
|
||||
pool.wait();
|
||||
/// Any exception will be re-thrown.
|
||||
for (auto & future : part_removal_futures)
|
||||
future.get();
|
||||
part_removal_futures.clear();
|
||||
|
||||
if (parts_to_remove.size() != sum_of_ranges + excluded_parts.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
|
@ -1519,11 +1519,7 @@ private:
|
||||
size_t max_backoff_ms,
|
||||
size_t max_tries);
|
||||
|
||||
std::vector<LoadPartResult> loadDataPartsFromDisk(
|
||||
ThreadPool & pool,
|
||||
size_t num_parts,
|
||||
std::queue<PartLoadingTreeNodes> & parts_queue,
|
||||
const MergeTreeSettingsPtr & settings);
|
||||
std::vector<LoadPartResult> loadDataPartsFromDisk(PartLoadingTreeNodes & parts_to_load);
|
||||
|
||||
void loadDataPartsFromWAL(MutableDataPartsVector & parts_from_wal);
|
||||
|
||||
|
@ -143,8 +143,6 @@ struct Settings;
|
||||
M(Bool, ttl_only_drop_parts, false, "Only drop altogether the expired parts and not partially prune them.", 0) \
|
||||
M(Bool, materialize_ttl_recalculate_only, false, "Only recalculate ttl info when MATERIALIZE TTL", 0) \
|
||||
M(Bool, enable_mixed_granularity_parts, true, "Enable parts with adaptive and non adaptive granularity", 0) \
|
||||
M(MaxThreads, max_part_loading_threads, 0, "The number of threads to load data parts at startup.", 0) \
|
||||
M(MaxThreads, max_part_removal_threads, 0, "The number of threads for concurrent removal of inactive data parts. One is usually enough, but in 'Google Compute Environment SSD Persistent Disks' file removal (unlink) operation is extraordinarily slow and you probably have to increase this number (recommended is up to 16).", 0) \
|
||||
M(UInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.", 0) \
|
||||
M(UInt64, zero_copy_concurrent_part_removal_max_split_times, 5, "Max recursion depth for splitting independent Outdated parts ranges into smaller subranges (highly not recommended to change)", 0) \
|
||||
M(Float, zero_copy_concurrent_part_removal_max_postpone_ratio, static_cast<Float32>(0.05), "Max percentage of top level parts to postpone removal in order to get smaller independent ranges (highly not recommended to change)", 0) \
|
||||
@ -192,6 +190,9 @@ struct Settings;
|
||||
M(UInt64, write_ahead_log_bytes_to_fsync, 100ULL * 1024 * 1024, "Obsolete setting, does nothing.", 0) \
|
||||
M(UInt64, write_ahead_log_interval_ms_to_fsync, 100, "Obsolete setting, does nothing.", 0) \
|
||||
M(Bool, in_memory_parts_insert_sync, false, "Obsolete setting, does nothing.", 0) \
|
||||
M(MaxThreads, max_part_loading_threads, 0, "Obsolete setting, does nothing.", 0) \
|
||||
M(MaxThreads, max_part_removal_threads, 0, "Obsolete setting, does nothing.", 0) \
|
||||
|
||||
/// Settings that should not change after the creation of a table.
|
||||
/// NOLINTNEXTLINE
|
||||
#define APPLY_FOR_IMMUTABLE_MERGE_TREE_SETTINGS(M) \
|
||||
|
@ -105,7 +105,7 @@ struct MergeTreeSource::AsyncReadingState
|
||||
AsyncReadingState()
|
||||
{
|
||||
control = std::make_shared<Control>();
|
||||
callback_runner = threadPoolCallbackRunner<void>(IOThreadPool::get(), "MergeTreeRead");
|
||||
callback_runner = threadPoolCallbackRunner<void>(getIOThreadPool().get(), "MergeTreeRead");
|
||||
}
|
||||
|
||||
~AsyncReadingState()
|
||||
|
@ -766,7 +766,7 @@ public:
|
||||
DBMS_DEFAULT_BUFFER_SIZE,
|
||||
configuration_.request_settings,
|
||||
std::nullopt,
|
||||
threadPoolCallbackRunner<void>(IOThreadPool::get(), "S3ParallelWrite"),
|
||||
threadPoolCallbackRunner<void>(getIOThreadPool().get(), "S3ParallelWrite"),
|
||||
context->getWriteSettings()),
|
||||
compression_method,
|
||||
3);
|
||||
|
@ -194,7 +194,7 @@ private:
|
||||
futures.push_back(
|
||||
scheduleFromThreadPool<void>(
|
||||
std::move(worker),
|
||||
IOThreadPool::get(),
|
||||
getIOThreadPool().get(),
|
||||
"DP_BytesOnDisk"));
|
||||
}
|
||||
|
||||
|
57
src/Storages/System/StorageSystemUserProcesses.cpp
Normal file
57
src/Storages/System/StorageSystemUserProcesses.cpp
Normal file
@ -0,0 +1,57 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/ProcessList.h>
|
||||
#include <Interpreters/ProfileEventsExt.h>
|
||||
#include <Storages/System/StorageSystemUserProcesses.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
NamesAndTypesList StorageSystemUserProcesses::getNamesAndTypes()
|
||||
{
|
||||
return {
|
||||
{"user", std::make_shared<DataTypeString>()},
|
||||
{"memory_usage", std::make_shared<DataTypeInt64>()},
|
||||
{"peak_memory_usage", std::make_shared<DataTypeInt64>()},
|
||||
{"ProfileEvents", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeUInt64>())},
|
||||
};
|
||||
}
|
||||
|
||||
NamesAndAliases StorageSystemUserProcesses::getNamesAndAliases()
|
||||
{
|
||||
return {
|
||||
{"ProfileEvents.Names", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>())}, "mapKeys(ProfileEvents)"},
|
||||
{"ProfileEvents.Values", {std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>())}, "mapValues(ProfileEvents)"}};
|
||||
}
|
||||
|
||||
void StorageSystemUserProcesses::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const
|
||||
{
|
||||
const auto user_info = context->getProcessList().getUserInfo(true);
|
||||
|
||||
for (const auto & [user, info] : user_info)
|
||||
{
|
||||
size_t i = 0;
|
||||
|
||||
res_columns[i++]->insert(user);
|
||||
res_columns[i++]->insert(info.memory_usage);
|
||||
res_columns[i++]->insert(info.peak_memory_usage);
|
||||
{
|
||||
IColumn * column = res_columns[i++].get();
|
||||
|
||||
if (info.profile_counters)
|
||||
ProfileEvents::dumpToMapColumn(*info.profile_counters, column, true);
|
||||
else
|
||||
{
|
||||
column->insertDefault();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
29
src/Storages/System/StorageSystemUserProcesses.h
Normal file
29
src/Storages/System/StorageSystemUserProcesses.h
Normal file
@ -0,0 +1,29 @@
|
||||
#pragma once
|
||||
|
||||
#include <Storages/System/IStorageSystemOneBlock.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
|
||||
/** Implements `processes` system table, which allows you to get information about the queries that are currently executing.
|
||||
*/
|
||||
class StorageSystemUserProcesses final : public IStorageSystemOneBlock<StorageSystemUserProcesses>
|
||||
{
|
||||
public:
|
||||
std::string getName() const override { return "SystemUserProcesses"; }
|
||||
|
||||
static NamesAndTypesList getNamesAndTypes();
|
||||
|
||||
static NamesAndAliases getNamesAndAliases();
|
||||
|
||||
protected:
|
||||
using IStorageSystemOneBlock::IStorageSystemOneBlock;
|
||||
|
||||
void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const override;
|
||||
};
|
||||
|
||||
}
|
@ -36,6 +36,7 @@
|
||||
#include <Storages/System/StorageSystemPartsColumns.h>
|
||||
#include <Storages/System/StorageSystemProjectionPartsColumns.h>
|
||||
#include <Storages/System/StorageSystemProcesses.h>
|
||||
#include <Storages/System/StorageSystemUserProcesses.h>
|
||||
#include <Storages/System/StorageSystemReplicas.h>
|
||||
#include <Storages/System/StorageSystemReplicationQueue.h>
|
||||
#include <Storages/System/StorageSystemDistributionQueue.h>
|
||||
@ -185,6 +186,7 @@ void attachSystemTablesServer(ContextPtr context, IDatabase & system_database, b
|
||||
attach<StorageSystemRemoteDataPaths>(context, system_database, "remote_data_paths");
|
||||
attach<StorageSystemCertificates>(context, system_database, "certificates");
|
||||
attach<StorageSystemNamedCollections>(context, system_database, "named_collections");
|
||||
attach<StorageSystemUserProcesses>(context, system_database, "user_processes");
|
||||
|
||||
if (has_zookeeper)
|
||||
{
|
||||
|
@ -60,6 +60,15 @@ ColumnsDescription getStructureOfRemoteTableInShard(
|
||||
ColumnsDescription res;
|
||||
auto new_context = ClusterProxy::updateSettingsForCluster(cluster, context, context->getSettingsRef(), table_id);
|
||||
|
||||
/// Ignore limit for result number of rows (that could be set during handling CSE/CTE),
|
||||
/// since this is a service query and should not lead to query failure.
|
||||
{
|
||||
Settings new_settings = new_context->getSettings();
|
||||
new_settings.max_result_rows = 0;
|
||||
new_settings.max_result_bytes = 0;
|
||||
new_context->setSettings(new_settings);
|
||||
}
|
||||
|
||||
/// Expect only needed columns from the result of DESC TABLE. NOTE 'comment' column is ignored for compatibility reasons.
|
||||
Block sample_block
|
||||
{
|
||||
|
@ -2,30 +2,27 @@
|
||||
|
||||
"""The lambda to decrease/increase ASG desired capacity based on current queue"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pprint import pformat
|
||||
from typing import Any, List, Literal, Optional, Tuple
|
||||
|
||||
import boto3 # type: ignore
|
||||
import requests # type: ignore
|
||||
|
||||
RUNNER_TYPE_LABELS = [
|
||||
"builder",
|
||||
"func-tester",
|
||||
"func-tester-aarch64",
|
||||
"fuzzer-unit-tester",
|
||||
"stress-tester",
|
||||
"style-checker",
|
||||
"style-checker-aarch64",
|
||||
]
|
||||
from lambda_shared import (
|
||||
CHException,
|
||||
ClickHouseHelper,
|
||||
RUNNER_TYPE_LABELS,
|
||||
get_parameter_from_ssm,
|
||||
)
|
||||
|
||||
### Update comment on the change ###
|
||||
# 4 HOUR - is a balance to get the most precise values
|
||||
# - Our longest possible running check is around 5h on the worst scenario
|
||||
# - The long queue won't be wiped out and replaced, so the measurmenet is fine
|
||||
# - If the data is spoiled by something, we are from the bills perspective
|
||||
# Changed it to 3 HOUR: in average we have 1h tasks, but p90 is around 2h.
|
||||
# With 4h we have too much wasted computing time in case of issues with DB
|
||||
QUEUE_QUERY = f"""SELECT
|
||||
last_status AS status,
|
||||
toUInt32(count()) AS length,
|
||||
@ -40,7 +37,7 @@ FROM
|
||||
FROM default.workflow_jobs
|
||||
WHERE has(labels, 'self-hosted')
|
||||
AND hasAny({RUNNER_TYPE_LABELS}, labels)
|
||||
AND started_at > now() - INTERVAL 4 HOUR
|
||||
AND started_at > now() - INTERVAL 3 HOUR
|
||||
GROUP BY ALL
|
||||
HAVING last_status IN ('in_progress', 'queued')
|
||||
)
|
||||
@ -68,64 +65,14 @@ def get_scales(runner_type: str) -> Tuple[int, int]:
|
||||
# 10. I am trying 7 now.
|
||||
# UPDATE THE COMMENT ON CHANGES
|
||||
scale_up = 7
|
||||
elif runner_type == "limited-tester":
|
||||
# The limited runners should inflate and deflate faster
|
||||
scale_down = 1
|
||||
scale_up = 2
|
||||
return scale_down, scale_up
|
||||
|
||||
|
||||
### VENDORING
|
||||
def get_parameter_from_ssm(name, decrypt=True, client=None):
|
||||
if not client:
|
||||
client = boto3.client("ssm", region_name="us-east-1")
|
||||
return client.get_parameter(Name=name, WithDecryption=decrypt)["Parameter"]["Value"]
|
||||
|
||||
|
||||
class CHException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ClickHouseHelper:
|
||||
def __init__(
|
||||
self,
|
||||
url: Optional[str] = None,
|
||||
user: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
):
|
||||
self.url = url
|
||||
self.auth = {}
|
||||
if user:
|
||||
self.auth["X-ClickHouse-User"] = user
|
||||
if password:
|
||||
self.auth["X-ClickHouse-Key"] = password
|
||||
|
||||
def _select_and_get_json_each_row(self, db, query):
|
||||
params = {
|
||||
"database": db,
|
||||
"query": query,
|
||||
"default_format": "JSONEachRow",
|
||||
}
|
||||
for i in range(5):
|
||||
response = None
|
||||
try:
|
||||
response = requests.get(self.url, params=params, headers=self.auth)
|
||||
response.raise_for_status()
|
||||
return response.text
|
||||
except Exception as ex:
|
||||
logging.warning("Cannot fetch data with exception %s", str(ex))
|
||||
if response:
|
||||
logging.warning("Reponse text %s", response.text)
|
||||
time.sleep(0.1 * i)
|
||||
|
||||
raise CHException("Cannot fetch data from clickhouse")
|
||||
|
||||
def select_json_each_row(self, db, query):
|
||||
text = self._select_and_get_json_each_row(db, query)
|
||||
result = []
|
||||
for line in text.split("\n"):
|
||||
if line:
|
||||
result.append(json.loads(line))
|
||||
return result
|
||||
|
||||
|
||||
CH_CLIENT = ClickHouseHelper(get_parameter_from_ssm("clickhouse-test-stat-url"), "play")
|
||||
CH_CLIENT = None # type: Optional[ClickHouseHelper]
|
||||
|
||||
|
||||
def set_capacity(
|
||||
@ -170,7 +117,17 @@ def set_capacity(
|
||||
# Finally, should the capacity be even changed
|
||||
stop = stop or asg["DesiredCapacity"] == desired_capacity
|
||||
if stop:
|
||||
logging.info(
|
||||
"Do not increase ASG %s capacity, current capacity=%s, "
|
||||
"maximum capacity=%s, running jobs=%s, queue size=%s",
|
||||
asg["AutoScalingGroupName"],
|
||||
desired_capacity,
|
||||
asg["MaxSize"],
|
||||
running,
|
||||
queued,
|
||||
)
|
||||
return
|
||||
|
||||
logging.info(
|
||||
"The ASG %s capacity will be increased to %s, current capacity=%s, "
|
||||
"maximum capacity=%s, running jobs=%s, queue size=%s",
|
||||
@ -195,6 +152,15 @@ def set_capacity(
|
||||
desired_capacity = min(desired_capacity, asg["MaxSize"])
|
||||
stop = stop or asg["DesiredCapacity"] == desired_capacity
|
||||
if stop:
|
||||
logging.info(
|
||||
"Do not decrease ASG %s capacity, current capacity=%s, "
|
||||
"minimum capacity=%s, running jobs=%s, queue size=%s",
|
||||
asg["AutoScalingGroupName"],
|
||||
desired_capacity,
|
||||
asg["MinSize"],
|
||||
running,
|
||||
queued,
|
||||
)
|
||||
return
|
||||
|
||||
logging.info(
|
||||
@ -219,6 +185,9 @@ def main(dry_run: bool = True) -> None:
|
||||
asg_client = boto3.client("autoscaling")
|
||||
try:
|
||||
global CH_CLIENT
|
||||
CH_CLIENT = CH_CLIENT or ClickHouseHelper(
|
||||
get_parameter_from_ssm("clickhouse-test-stat-url"), "play"
|
||||
)
|
||||
queues = CH_CLIENT.select_json_each_row("default", QUEUE_QUERY)
|
||||
except CHException as ex:
|
||||
logging.exception(
|
||||
|
@ -4,7 +4,7 @@ import unittest
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, List
|
||||
|
||||
from autoscale_runners_lambda.app import set_capacity, Queue
|
||||
from app import set_capacity, Queue
|
||||
|
||||
|
||||
@dataclass
|
1
tests/ci/autoscale_runners_lambda/lambda_shared
Symbolic link
1
tests/ci/autoscale_runners_lambda/lambda_shared
Symbolic link
@ -0,0 +1 @@
|
||||
../lambda_shared_package/lambda_shared
|
@ -1 +1 @@
|
||||
requests<2.30
|
||||
../lambda_shared_package
|
||||
|
@ -9,9 +9,10 @@ import json
|
||||
import re
|
||||
import time
|
||||
|
||||
import jwt
|
||||
import requests # type: ignore
|
||||
import boto3 # type: ignore
|
||||
|
||||
from lambda_shared.pr import CATEGORY_TO_LABEL, check_pr_description
|
||||
from lambda_shared.token import get_cached_access_token
|
||||
|
||||
|
||||
NEED_RERUN_ON_EDITED = {
|
||||
@ -27,123 +28,6 @@ MAX_RETRY = 5
|
||||
|
||||
DEBUG_INFO = {} # type: Dict[str, Any]
|
||||
|
||||
# Descriptions are used in .github/PULL_REQUEST_TEMPLATE.md, keep comments there
|
||||
# updated accordingly
|
||||
# The following lists are append only, try to avoid editing them
|
||||
# They still could be cleaned out after the decent time though.
|
||||
LABELS = {
|
||||
"pr-backward-incompatible": ["Backward Incompatible Change"],
|
||||
"pr-bugfix": [
|
||||
"Bug Fix",
|
||||
"Bug Fix (user-visible misbehavior in an official stable release)",
|
||||
"Bug Fix (user-visible misbehaviour in official stable or prestable release)",
|
||||
"Bug Fix (user-visible misbehavior in official stable or prestable release)",
|
||||
],
|
||||
"pr-build": [
|
||||
"Build/Testing/Packaging Improvement",
|
||||
"Build Improvement",
|
||||
"Build/Testing Improvement",
|
||||
"Build",
|
||||
"Packaging Improvement",
|
||||
],
|
||||
"pr-documentation": [
|
||||
"Documentation (changelog entry is not required)",
|
||||
"Documentation",
|
||||
],
|
||||
"pr-feature": ["New Feature"],
|
||||
"pr-improvement": ["Improvement"],
|
||||
"pr-not-for-changelog": [
|
||||
"Not for changelog (changelog entry is not required)",
|
||||
"Not for changelog",
|
||||
],
|
||||
"pr-performance": ["Performance Improvement"],
|
||||
}
|
||||
|
||||
CATEGORY_TO_LABEL = {c: lb for lb, categories in LABELS.items() for c in categories}
|
||||
|
||||
|
||||
def check_pr_description(pr_body: str) -> Tuple[str, str]:
|
||||
"""The function checks the body to being properly formatted according to
|
||||
.github/PULL_REQUEST_TEMPLATE.md, if the first returned string is not empty,
|
||||
then there is an error."""
|
||||
lines = list(map(lambda x: x.strip(), pr_body.split("\n") if pr_body else []))
|
||||
lines = [re.sub(r"\s+", " ", line) for line in lines]
|
||||
|
||||
# Check if body contains "Reverts ClickHouse/ClickHouse#36337"
|
||||
if [
|
||||
True
|
||||
for line in lines
|
||||
if re.match(r"\AReverts {GITHUB_REPOSITORY}#[\d]+\Z", line)
|
||||
]:
|
||||
return "", LABELS["pr-not-for-changelog"][0]
|
||||
|
||||
category = ""
|
||||
entry = ""
|
||||
description_error = ""
|
||||
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
if re.match(r"(?i)^[#>*_ ]*change\s*log\s*category", lines[i]):
|
||||
i += 1
|
||||
if i >= len(lines):
|
||||
break
|
||||
# Can have one empty line between header and the category
|
||||
# itself. Filter it out.
|
||||
if not lines[i]:
|
||||
i += 1
|
||||
if i >= len(lines):
|
||||
break
|
||||
category = re.sub(r"^[-*\s]*", "", lines[i])
|
||||
i += 1
|
||||
|
||||
# Should not have more than one category. Require empty line
|
||||
# after the first found category.
|
||||
if i >= len(lines):
|
||||
break
|
||||
if lines[i]:
|
||||
second_category = re.sub(r"^[-*\s]*", "", lines[i])
|
||||
description_error = (
|
||||
"More than one changelog category specified: "
|
||||
f"'{category}', '{second_category}'"
|
||||
)
|
||||
return description_error, category
|
||||
|
||||
elif re.match(
|
||||
r"(?i)^[#>*_ ]*(short\s*description|change\s*log\s*entry)", lines[i]
|
||||
):
|
||||
i += 1
|
||||
# Can have one empty line between header and the entry itself.
|
||||
# Filter it out.
|
||||
if i < len(lines) and not lines[i]:
|
||||
i += 1
|
||||
# All following lines until empty one are the changelog entry.
|
||||
entry_lines = []
|
||||
while i < len(lines) and lines[i]:
|
||||
entry_lines.append(lines[i])
|
||||
i += 1
|
||||
entry = " ".join(entry_lines)
|
||||
# Don't accept changelog entries like '...'.
|
||||
entry = re.sub(r"[#>*_.\- ]", "", entry)
|
||||
# Don't accept changelog entries like 'Close #12345'.
|
||||
entry = re.sub(r"^[\w\-\s]{0,10}#?\d{5,6}\.?$", "", entry)
|
||||
else:
|
||||
i += 1
|
||||
|
||||
if not category:
|
||||
description_error = "Changelog category is empty"
|
||||
# Filter out the PR categories that are not for changelog.
|
||||
elif re.match(
|
||||
r"(?i)doc|((non|in|not|un)[-\s]*significant)|(not[ ]*for[ ]*changelog)",
|
||||
category,
|
||||
):
|
||||
pass # to not check the rest of the conditions
|
||||
elif category not in CATEGORY_TO_LABEL:
|
||||
description_error, category = f"Category '{category}' is not valid", ""
|
||||
elif not entry:
|
||||
description_error = f"Changelog entry required for category '{category}'"
|
||||
|
||||
return description_error, category
|
||||
|
||||
|
||||
class Worker(Thread):
|
||||
def __init__(
|
||||
@ -166,58 +50,6 @@ class Worker(Thread):
|
||||
self.queue.task_done()
|
||||
|
||||
|
||||
def get_installation_id(jwt_token):
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.get("https://api.github.com/app/installations", headers=headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
for installation in data:
|
||||
if installation["account"]["login"] == "ClickHouse":
|
||||
installation_id = installation["id"]
|
||||
return installation_id
|
||||
|
||||
|
||||
def get_access_token(jwt_token, installation_id):
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.post(
|
||||
f"https://api.github.com/app/installations/{installation_id}/access_tokens",
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data["token"]
|
||||
|
||||
|
||||
def get_key_and_app_from_aws():
|
||||
secret_name = "clickhouse_github_secret_key"
|
||||
session = boto3.session.Session()
|
||||
client = session.client(
|
||||
service_name="secretsmanager",
|
||||
)
|
||||
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
|
||||
data = json.loads(get_secret_value_response["SecretString"])
|
||||
return data["clickhouse-app-key"], int(data["clickhouse-app-id"])
|
||||
|
||||
|
||||
def get_token_from_aws():
|
||||
private_key, app_id = get_key_and_app_from_aws()
|
||||
payload = {
|
||||
"iat": int(time.time()) - 60,
|
||||
"exp": int(time.time()) + (10 * 60),
|
||||
"iss": app_id,
|
||||
}
|
||||
|
||||
encoded_jwt = jwt.encode(payload, private_key, algorithm="RS256")
|
||||
installation_id = get_installation_id(encoded_jwt)
|
||||
return get_access_token(encoded_jwt, installation_id)
|
||||
|
||||
|
||||
def _exec_get_with_retry(url: str, token: str) -> dict:
|
||||
headers = {"Authorization": f"token {token}"}
|
||||
for i in range(MAX_RETRY):
|
||||
@ -407,7 +239,7 @@ def exec_workflow_url(urls_to_post, token):
|
||||
|
||||
|
||||
def main(event):
|
||||
token = get_token_from_aws()
|
||||
token = get_cached_access_token()
|
||||
DEBUG_INFO["event"] = event
|
||||
if event["isBase64Encoded"]:
|
||||
event_data = json.loads(b64decode(event["body"]))
|
||||
|
1
tests/ci/cancel_and_rerun_workflow_lambda/lambda_shared
Symbolic link
1
tests/ci/cancel_and_rerun_workflow_lambda/lambda_shared
Symbolic link
@ -0,0 +1 @@
|
||||
../lambda_shared_package/lambda_shared
|
@ -1,3 +1 @@
|
||||
requests<2.30
|
||||
PyJWT
|
||||
cryptography<38
|
||||
../lambda_shared_package[token]
|
||||
|
@ -8,32 +8,26 @@ Lambda function to:
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
from collections import namedtuple
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Tuple
|
||||
from typing import Dict, List
|
||||
|
||||
import jwt
|
||||
import requests # type: ignore
|
||||
import boto3 # type: ignore
|
||||
from botocore.exceptions import ClientError # type: ignore
|
||||
|
||||
UNIVERSAL_LABEL = "universal"
|
||||
RUNNER_TYPE_LABELS = [
|
||||
"builder",
|
||||
"func-tester",
|
||||
"func-tester-aarch64",
|
||||
"fuzzer-unit-tester",
|
||||
"stress-tester",
|
||||
"style-checker",
|
||||
"style-checker-aarch64",
|
||||
]
|
||||
|
||||
RunnerDescription = namedtuple(
|
||||
"RunnerDescription", ["id", "name", "tags", "offline", "busy"]
|
||||
from lambda_shared import (
|
||||
RUNNER_TYPE_LABELS,
|
||||
RunnerDescription,
|
||||
RunnerDescriptions,
|
||||
list_runners,
|
||||
)
|
||||
RunnerDescriptions = List[RunnerDescription]
|
||||
from lambda_shared.token import (
|
||||
get_cached_access_token,
|
||||
get_key_and_app_from_aws,
|
||||
get_access_token_by_key_app,
|
||||
)
|
||||
|
||||
UNIVERSAL_LABEL = "universal"
|
||||
|
||||
|
||||
def get_dead_runners_in_ec2(runners: RunnerDescriptions) -> RunnerDescriptions:
|
||||
@ -105,138 +99,53 @@ def get_dead_runners_in_ec2(runners: RunnerDescriptions) -> RunnerDescriptions:
|
||||
def get_lost_ec2_instances(runners: RunnerDescriptions) -> List[dict]:
|
||||
client = boto3.client("ec2")
|
||||
reservations = client.describe_instances(
|
||||
Filters=[{"Name": "tag-key", "Values": ["github:runner-type"]}]
|
||||
Filters=[
|
||||
{"Name": "tag-key", "Values": ["github:runner-type"]},
|
||||
{"Name": "instance-state-name", "Values": ["pending", "running"]},
|
||||
],
|
||||
)["Reservations"]
|
||||
lost_instances = []
|
||||
offline_runners = [
|
||||
runner.name for runner in runners if runner.offline and not runner.busy
|
||||
# flatten the reservation into instances
|
||||
instances = [
|
||||
instance
|
||||
for reservation in reservations
|
||||
for instance in reservation["Instances"]
|
||||
]
|
||||
# Here we refresh the runners to get the most recent state
|
||||
lost_instances = []
|
||||
offline_runner_names = {
|
||||
runner.name for runner in runners if runner.offline and not runner.busy
|
||||
}
|
||||
runner_names = {runner.name for runner in runners}
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
for reservation in reservations:
|
||||
for instance in reservation["Instances"]:
|
||||
# Do not consider instances started 20 minutes ago as problematic
|
||||
if now - instance["LaunchTime"].timestamp() < 1200:
|
||||
continue
|
||||
for instance in instances:
|
||||
# Do not consider instances started 20 minutes ago as problematic
|
||||
if now - instance["LaunchTime"].timestamp() < 1200:
|
||||
continue
|
||||
|
||||
runner_type = [
|
||||
tag["Value"]
|
||||
for tag in instance["Tags"]
|
||||
if tag["Key"] == "github:runner-type"
|
||||
][0]
|
||||
# If there's no necessary labels in runner type it's fine
|
||||
if not (
|
||||
UNIVERSAL_LABEL in runner_type or runner_type in RUNNER_TYPE_LABELS
|
||||
):
|
||||
continue
|
||||
runner_type = [
|
||||
tag["Value"]
|
||||
for tag in instance["Tags"]
|
||||
if tag["Key"] == "github:runner-type"
|
||||
][0]
|
||||
# If there's no necessary labels in runner type it's fine
|
||||
if not (UNIVERSAL_LABEL in runner_type or runner_type in RUNNER_TYPE_LABELS):
|
||||
continue
|
||||
|
||||
if instance["InstanceId"] in offline_runners:
|
||||
lost_instances.append(instance)
|
||||
continue
|
||||
if instance["InstanceId"] in offline_runner_names:
|
||||
lost_instances.append(instance)
|
||||
continue
|
||||
|
||||
if instance["State"]["Name"] == "running" and (
|
||||
not [
|
||||
runner
|
||||
for runner in runners
|
||||
if runner.name == instance["InstanceId"]
|
||||
]
|
||||
):
|
||||
lost_instances.append(instance)
|
||||
if (
|
||||
instance["State"]["Name"] == "running"
|
||||
and not instance["InstanceId"] in runner_names
|
||||
):
|
||||
lost_instances.append(instance)
|
||||
|
||||
return lost_instances
|
||||
|
||||
|
||||
def get_key_and_app_from_aws() -> Tuple[str, int]:
|
||||
secret_name = "clickhouse_github_secret_key"
|
||||
session = boto3.session.Session()
|
||||
client = session.client(
|
||||
service_name="secretsmanager",
|
||||
)
|
||||
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
|
||||
data = json.loads(get_secret_value_response["SecretString"])
|
||||
return data["clickhouse-app-key"], int(data["clickhouse-app-id"])
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
private_key, app_id = get_key_and_app_from_aws()
|
||||
main(private_key, app_id, True, True)
|
||||
|
||||
|
||||
def get_installation_id(jwt_token: str) -> int:
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.get("https://api.github.com/app/installations", headers=headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
for installation in data:
|
||||
if installation["account"]["login"] == "ClickHouse":
|
||||
installation_id = installation["id"]
|
||||
break
|
||||
|
||||
return installation_id # type: ignore
|
||||
|
||||
|
||||
def get_access_token(jwt_token: str, installation_id: int) -> str:
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.post(
|
||||
f"https://api.github.com/app/installations/{installation_id}/access_tokens",
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data["token"] # type: ignore
|
||||
|
||||
|
||||
def list_runners(access_token: str) -> RunnerDescriptions:
|
||||
headers = {
|
||||
"Authorization": f"token {access_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
per_page = 100
|
||||
response = requests.get(
|
||||
f"https://api.github.com/orgs/ClickHouse/actions/runners?per_page={per_page}",
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
total_runners = data["total_count"]
|
||||
print("Expected total runners", total_runners)
|
||||
runners = data["runners"]
|
||||
|
||||
# round to 0 for 0, 1 for 1..100, but to 2 for 101..200
|
||||
total_pages = (total_runners - 1) // per_page + 1
|
||||
|
||||
print("Total pages", total_pages)
|
||||
for i in range(2, total_pages + 1):
|
||||
response = requests.get(
|
||||
"https://api.github.com/orgs/ClickHouse/actions/runners"
|
||||
f"?page={i}&per_page={per_page}",
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
runners += data["runners"]
|
||||
|
||||
print("Total runners", len(runners))
|
||||
result = []
|
||||
for runner in runners:
|
||||
tags = [tag["name"] for tag in runner["labels"]]
|
||||
desc = RunnerDescription(
|
||||
id=runner["id"],
|
||||
name=runner["name"],
|
||||
tags=tags,
|
||||
offline=runner["status"] == "offline",
|
||||
busy=runner["busy"],
|
||||
)
|
||||
result.append(desc)
|
||||
|
||||
return result
|
||||
main(get_cached_access_token(), True, True)
|
||||
|
||||
|
||||
def group_runners_by_tag(
|
||||
@ -265,18 +174,21 @@ def group_runners_by_tag(
|
||||
|
||||
|
||||
def push_metrics_to_cloudwatch(
|
||||
listed_runners: RunnerDescriptions, namespace: str
|
||||
listed_runners: RunnerDescriptions, group_name: str
|
||||
) -> None:
|
||||
client = boto3.client("cloudwatch")
|
||||
namespace = "RunnersMetrics"
|
||||
metrics_data = []
|
||||
busy_runners = sum(
|
||||
1 for runner in listed_runners if runner.busy and not runner.offline
|
||||
)
|
||||
dimensions = [{"Name": "group", "Value": group_name}]
|
||||
metrics_data.append(
|
||||
{
|
||||
"MetricName": "BusyRunners",
|
||||
"Value": busy_runners,
|
||||
"Unit": "Count",
|
||||
"Dimensions": dimensions,
|
||||
}
|
||||
)
|
||||
total_active_runners = sum(1 for runner in listed_runners if not runner.offline)
|
||||
@ -285,6 +197,7 @@ def push_metrics_to_cloudwatch(
|
||||
"MetricName": "ActiveRunners",
|
||||
"Value": total_active_runners,
|
||||
"Unit": "Count",
|
||||
"Dimensions": dimensions,
|
||||
}
|
||||
)
|
||||
total_runners = len(listed_runners)
|
||||
@ -293,6 +206,7 @@ def push_metrics_to_cloudwatch(
|
||||
"MetricName": "TotalRunners",
|
||||
"Value": total_runners,
|
||||
"Unit": "Count",
|
||||
"Dimensions": dimensions,
|
||||
}
|
||||
)
|
||||
if total_active_runners == 0:
|
||||
@ -305,6 +219,7 @@ def push_metrics_to_cloudwatch(
|
||||
"MetricName": "BusyRunnersRatio",
|
||||
"Value": busy_ratio,
|
||||
"Unit": "Percent",
|
||||
"Dimensions": dimensions,
|
||||
}
|
||||
)
|
||||
|
||||
@ -327,26 +242,16 @@ def delete_runner(access_token: str, runner: RunnerDescription) -> bool:
|
||||
|
||||
|
||||
def main(
|
||||
github_secret_key: str,
|
||||
github_app_id: int,
|
||||
access_token: str,
|
||||
push_to_cloudwatch: bool,
|
||||
delete_offline_runners: bool,
|
||||
) -> None:
|
||||
payload = {
|
||||
"iat": int(time.time()) - 60,
|
||||
"exp": int(time.time()) + (10 * 60),
|
||||
"iss": github_app_id,
|
||||
}
|
||||
|
||||
encoded_jwt = jwt.encode(payload, github_secret_key, algorithm="RS256")
|
||||
installation_id = get_installation_id(encoded_jwt)
|
||||
access_token = get_access_token(encoded_jwt, installation_id)
|
||||
gh_runners = list_runners(access_token)
|
||||
grouped_runners = group_runners_by_tag(gh_runners)
|
||||
for group, group_runners in grouped_runners.items():
|
||||
if push_to_cloudwatch:
|
||||
print(f"Pushing metrics for group '{group}'")
|
||||
push_metrics_to_cloudwatch(group_runners, "RunnersMetrics/" + group)
|
||||
push_metrics_to_cloudwatch(group_runners, group)
|
||||
else:
|
||||
print(group, f"({len(group_runners)})")
|
||||
for runner in group_runners:
|
||||
@ -408,4 +313,6 @@ if __name__ == "__main__":
|
||||
print("Attempt to get key and id from AWS secret manager")
|
||||
private_key, args.app_id = get_key_and_app_from_aws()
|
||||
|
||||
main(private_key, args.app_id, args.push_to_cloudwatch, args.delete_offline)
|
||||
token = get_access_token_by_key_app(private_key, args.app_id)
|
||||
|
||||
main(token, args.push_to_cloudwatch, args.delete_offline)
|
||||
|
1
tests/ci/ci_runners_metrics_lambda/lambda_shared
Symbolic link
1
tests/ci/ci_runners_metrics_lambda/lambda_shared
Symbolic link
@ -0,0 +1 @@
|
||||
../lambda_shared_package/lambda_shared
|
@ -1,3 +1,2 @@
|
||||
requests<2.30
|
||||
PyJWT
|
||||
cryptography<38
|
||||
../lambda_shared_package
|
||||
../lambda_shared_package[token]
|
||||
|
@ -25,6 +25,7 @@ from stopwatch import Stopwatch
|
||||
from tee_popen import TeePopen
|
||||
from upload_result_helper import upload_results
|
||||
from version_helper import get_version_from_repo
|
||||
from build_check import get_release_or_pr
|
||||
|
||||
JEPSEN_GROUP_NAME = "jepsen_group"
|
||||
|
||||
@ -210,12 +211,7 @@ if __name__ == "__main__":
|
||||
|
||||
build_name = get_build_name_for_check(check_name)
|
||||
|
||||
if pr_info.number == 0:
|
||||
version = get_version_from_repo()
|
||||
release_or_pr = f"{version.major}.{version.minor}"
|
||||
else:
|
||||
# PR number for anything else
|
||||
release_or_pr = str(pr_info.number)
|
||||
release_or_pr, _ = get_release_or_pr(pr_info, get_version_from_repo())
|
||||
|
||||
# This check run separately from other checks because it requires exclusive
|
||||
# run (see .github/workflows/jepsen.yml) So we cannot add explicit
|
||||
|
2
tests/ci/lambda_shared_package/.gitignore
vendored
Normal file
2
tests/ci/lambda_shared_package/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
build
|
||||
*.egg-info
|
221
tests/ci/lambda_shared_package/lambda_shared/__init__.py
Normal file
221
tests/ci/lambda_shared_package/lambda_shared/__init__.py
Normal file
@ -0,0 +1,221 @@
|
||||
"""The shared code and types for all our CI lambdas
|
||||
It exists as __init__.py and lambda_shared/__init__.py to work both in local and venv"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from collections import namedtuple
|
||||
from typing import Any, Dict, Iterable, List, Optional
|
||||
|
||||
import boto3 # type: ignore
|
||||
import requests # type: ignore
|
||||
|
||||
RUNNER_TYPE_LABELS = [
|
||||
"builder",
|
||||
"func-tester",
|
||||
"func-tester-aarch64",
|
||||
"fuzzer-unit-tester",
|
||||
"limited-tester",
|
||||
"stress-tester",
|
||||
"style-checker",
|
||||
"style-checker-aarch64",
|
||||
]
|
||||
|
||||
|
||||
### VENDORING
|
||||
def get_parameter_from_ssm(
|
||||
name: str, decrypt: bool = True, client: Optional[Any] = None
|
||||
) -> str:
|
||||
if not client:
|
||||
client = boto3.client("ssm", region_name="us-east-1")
|
||||
return client.get_parameter(Name=name, WithDecryption=decrypt)[ # type: ignore
|
||||
"Parameter"
|
||||
]["Value"]
|
||||
|
||||
|
||||
class CHException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InsertException(CHException):
|
||||
pass
|
||||
|
||||
|
||||
class ClickHouseHelper:
|
||||
def __init__(
|
||||
self,
|
||||
url: str,
|
||||
user: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
):
|
||||
self.url = url
|
||||
self.auth = {}
|
||||
if user:
|
||||
self.auth["X-ClickHouse-User"] = user
|
||||
if password:
|
||||
self.auth["X-ClickHouse-Key"] = password
|
||||
|
||||
@staticmethod
|
||||
def _insert_json_str_info_impl(
|
||||
url: str, auth: Dict[str, str], db: str, table: str, json_str: str
|
||||
) -> None:
|
||||
params = {
|
||||
"database": db,
|
||||
"query": f"INSERT INTO {table} FORMAT JSONEachRow",
|
||||
"date_time_input_format": "best_effort",
|
||||
"send_logs_level": "warning",
|
||||
}
|
||||
|
||||
for i in range(5):
|
||||
try:
|
||||
response = requests.post(
|
||||
url, params=params, data=json_str, headers=auth
|
||||
)
|
||||
except Exception as e:
|
||||
error = f"Received exception while sending data to {url} on {i} attempt: {e}"
|
||||
logging.warning(error)
|
||||
continue
|
||||
|
||||
logging.info("Response content '%s'", response.content)
|
||||
|
||||
if response.ok:
|
||||
break
|
||||
|
||||
error = (
|
||||
"Cannot insert data into clickhouse at try "
|
||||
+ str(i)
|
||||
+ ": HTTP code "
|
||||
+ str(response.status_code)
|
||||
+ ": '"
|
||||
+ str(response.text)
|
||||
+ "'"
|
||||
)
|
||||
|
||||
if response.status_code >= 500:
|
||||
# A retriable error
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
logging.info(
|
||||
"Request headers '%s', body '%s'",
|
||||
response.request.headers,
|
||||
response.request.body,
|
||||
)
|
||||
|
||||
raise InsertException(error)
|
||||
else:
|
||||
raise InsertException(error)
|
||||
|
||||
def _insert_json_str_info(self, db: str, table: str, json_str: str) -> None:
|
||||
self._insert_json_str_info_impl(self.url, self.auth, db, table, json_str)
|
||||
|
||||
def insert_event_into(
|
||||
self, db: str, table: str, event: object, safe: bool = True
|
||||
) -> None:
|
||||
event_str = json.dumps(event)
|
||||
try:
|
||||
self._insert_json_str_info(db, table, event_str)
|
||||
except InsertException as e:
|
||||
logging.error(
|
||||
"Exception happened during inserting data into clickhouse: %s", e
|
||||
)
|
||||
if not safe:
|
||||
raise
|
||||
|
||||
def insert_events_into(
|
||||
self, db: str, table: str, events: Iterable[object], safe: bool = True
|
||||
) -> None:
|
||||
jsons = []
|
||||
for event in events:
|
||||
jsons.append(json.dumps(event))
|
||||
|
||||
try:
|
||||
self._insert_json_str_info(db, table, ",".join(jsons))
|
||||
except InsertException as e:
|
||||
logging.error(
|
||||
"Exception happened during inserting data into clickhouse: %s", e
|
||||
)
|
||||
if not safe:
|
||||
raise
|
||||
|
||||
def _select_and_get_json_each_row(self, db: str, query: str) -> str:
|
||||
params = {
|
||||
"database": db,
|
||||
"query": query,
|
||||
"default_format": "JSONEachRow",
|
||||
}
|
||||
for i in range(5):
|
||||
response = None
|
||||
try:
|
||||
response = requests.get(self.url, params=params, headers=self.auth)
|
||||
response.raise_for_status()
|
||||
return response.text # type: ignore
|
||||
except Exception as ex:
|
||||
logging.warning("Cannot fetch data with exception %s", str(ex))
|
||||
if response:
|
||||
logging.warning("Reponse text %s", response.text)
|
||||
time.sleep(0.1 * i)
|
||||
|
||||
raise CHException("Cannot fetch data from clickhouse")
|
||||
|
||||
def select_json_each_row(self, db: str, query: str) -> List[dict]:
|
||||
text = self._select_and_get_json_each_row(db, query)
|
||||
result = []
|
||||
for line in text.split("\n"):
|
||||
if line:
|
||||
result.append(json.loads(line))
|
||||
return result
|
||||
|
||||
|
||||
### Runners
|
||||
|
||||
RunnerDescription = namedtuple(
|
||||
"RunnerDescription", ["id", "name", "tags", "offline", "busy"]
|
||||
)
|
||||
RunnerDescriptions = List[RunnerDescription]
|
||||
|
||||
|
||||
def list_runners(access_token: str) -> RunnerDescriptions:
|
||||
headers = {
|
||||
"Authorization": f"token {access_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
per_page = 100
|
||||
response = requests.get(
|
||||
f"https://api.github.com/orgs/ClickHouse/actions/runners?per_page={per_page}",
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
total_runners = data["total_count"]
|
||||
print("Expected total runners", total_runners)
|
||||
runners = data["runners"]
|
||||
|
||||
# round to 0 for 0, 1 for 1..100, but to 2 for 101..200
|
||||
total_pages = (total_runners - 1) // per_page + 1
|
||||
|
||||
print("Total pages", total_pages)
|
||||
for i in range(2, total_pages + 1):
|
||||
response = requests.get(
|
||||
"https://api.github.com/orgs/ClickHouse/actions/runners"
|
||||
f"?page={i}&per_page={per_page}",
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
runners += data["runners"]
|
||||
|
||||
print("Total runners", len(runners))
|
||||
result = []
|
||||
for runner in runners:
|
||||
tags = [tag["name"] for tag in runner["labels"]]
|
||||
desc = RunnerDescription(
|
||||
id=runner["id"],
|
||||
name=runner["name"],
|
||||
tags=tags,
|
||||
offline=runner["status"] == "offline",
|
||||
busy=runner["busy"],
|
||||
)
|
||||
result.append(desc)
|
||||
|
||||
return result
|
184
tests/ci/lambda_shared_package/lambda_shared/pr.py
Normal file
184
tests/ci/lambda_shared_package/lambda_shared/pr.py
Normal file
@ -0,0 +1,184 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import re
|
||||
from typing import Tuple
|
||||
|
||||
# Individual trusted contirbutors who are not in any trusted organization.
|
||||
# Can be changed in runtime: we will append users that we learned to be in
|
||||
# a trusted org, to save GitHub API calls.
|
||||
TRUSTED_CONTRIBUTORS = {
|
||||
e.lower()
|
||||
for e in [
|
||||
"achimbab",
|
||||
"adevyatova ", # DOCSUP
|
||||
"Algunenano", # Raúl Marín, Tinybird
|
||||
"amosbird",
|
||||
"AnaUvarova", # DOCSUP
|
||||
"anauvarova", # technical writer, Yandex
|
||||
"annvsh", # technical writer, Yandex
|
||||
"atereh", # DOCSUP
|
||||
"azat",
|
||||
"bharatnc", # Newbie, but already with many contributions.
|
||||
"bobrik", # Seasoned contributor, CloudFlare
|
||||
"BohuTANG",
|
||||
"codyrobert", # Flickerbox engineer
|
||||
"cwurm", # Employee
|
||||
"damozhaeva", # DOCSUP
|
||||
"den-crane",
|
||||
"flickerbox-tom", # Flickerbox
|
||||
"gyuton", # DOCSUP
|
||||
"hagen1778", # Roman Khavronenko, seasoned contributor
|
||||
"hczhcz",
|
||||
"hexiaoting", # Seasoned contributor
|
||||
"ildus", # adjust, ex-pgpro
|
||||
"javisantana", # a Spanish ClickHouse enthusiast, ex-Carto
|
||||
"ka1bi4", # DOCSUP
|
||||
"kirillikoff", # DOCSUP
|
||||
"kreuzerkrieg",
|
||||
"lehasm", # DOCSUP
|
||||
"michon470", # DOCSUP
|
||||
"nikvas0",
|
||||
"nvartolomei",
|
||||
"olgarev", # DOCSUP
|
||||
"otrazhenia", # Yandex docs contractor
|
||||
"pdv-ru", # DOCSUP
|
||||
"podshumok", # cmake expert from QRator Labs
|
||||
"s-mx", # Maxim Sabyanin, former employee, present contributor
|
||||
"sevirov", # technical writer, Yandex
|
||||
"spongedu", # Seasoned contributor
|
||||
"taiyang-li",
|
||||
"ucasFL", # Amos Bird's friend
|
||||
"vdimir", # Employee
|
||||
"vzakaznikov",
|
||||
"YiuRULE",
|
||||
"zlobober", # Developer of YT
|
||||
"ilejn", # Arenadata, responsible for Kerberized Kafka
|
||||
"thomoco", # ClickHouse
|
||||
"BoloniniD", # Seasoned contributor, HSE
|
||||
"tonickkozlov", # Cloudflare
|
||||
"tylerhannan", # ClickHouse Employee
|
||||
"myrrc", # Mike Kot, DoubleCloud
|
||||
"thevar1able", # ClickHouse Employee
|
||||
"aalexfvk",
|
||||
"MikhailBurdukov",
|
||||
"tsolodov", # ClickHouse Employee
|
||||
"kitaisreal",
|
||||
]
|
||||
}
|
||||
|
||||
# Descriptions are used in .github/PULL_REQUEST_TEMPLATE.md, keep comments there
|
||||
# updated accordingly
|
||||
# The following lists are append only, try to avoid editing them
|
||||
# They still could be cleaned out after the decent time though.
|
||||
LABELS = {
|
||||
"pr-backward-incompatible": ["Backward Incompatible Change"],
|
||||
"pr-bugfix": [
|
||||
"Bug Fix",
|
||||
"Bug Fix (user-visible misbehavior in an official stable release)",
|
||||
"Bug Fix (user-visible misbehaviour in official stable or prestable release)",
|
||||
"Bug Fix (user-visible misbehavior in official stable or prestable release)",
|
||||
],
|
||||
"pr-build": [
|
||||
"Build/Testing/Packaging Improvement",
|
||||
"Build Improvement",
|
||||
"Build/Testing Improvement",
|
||||
"Build",
|
||||
"Packaging Improvement",
|
||||
],
|
||||
"pr-documentation": [
|
||||
"Documentation (changelog entry is not required)",
|
||||
"Documentation",
|
||||
],
|
||||
"pr-feature": ["New Feature"],
|
||||
"pr-improvement": ["Improvement"],
|
||||
"pr-not-for-changelog": [
|
||||
"Not for changelog (changelog entry is not required)",
|
||||
"Not for changelog",
|
||||
],
|
||||
"pr-performance": ["Performance Improvement"],
|
||||
}
|
||||
|
||||
CATEGORY_TO_LABEL = {c: lb for lb, categories in LABELS.items() for c in categories}
|
||||
|
||||
|
||||
def check_pr_description(pr_body: str) -> Tuple[str, str]:
|
||||
"""The function checks the body to being properly formatted according to
|
||||
.github/PULL_REQUEST_TEMPLATE.md, if the first returned string is not empty,
|
||||
then there is an error."""
|
||||
lines = list(map(lambda x: x.strip(), pr_body.split("\n") if pr_body else []))
|
||||
lines = [re.sub(r"\s+", " ", line) for line in lines]
|
||||
|
||||
# Check if body contains "Reverts ClickHouse/ClickHouse#36337"
|
||||
if [
|
||||
True
|
||||
for line in lines
|
||||
if re.match(r"\AReverts {GITHUB_REPOSITORY}#[\d]+\Z", line)
|
||||
]:
|
||||
return "", LABELS["pr-not-for-changelog"][0]
|
||||
|
||||
category = ""
|
||||
entry = ""
|
||||
description_error = ""
|
||||
|
||||
i = 0
|
||||
while i < len(lines):
|
||||
if re.match(r"(?i)^[#>*_ ]*change\s*log\s*category", lines[i]):
|
||||
i += 1
|
||||
if i >= len(lines):
|
||||
break
|
||||
# Can have one empty line between header and the category
|
||||
# itself. Filter it out.
|
||||
if not lines[i]:
|
||||
i += 1
|
||||
if i >= len(lines):
|
||||
break
|
||||
category = re.sub(r"^[-*\s]*", "", lines[i])
|
||||
i += 1
|
||||
|
||||
# Should not have more than one category. Require empty line
|
||||
# after the first found category.
|
||||
if i >= len(lines):
|
||||
break
|
||||
if lines[i]:
|
||||
second_category = re.sub(r"^[-*\s]*", "", lines[i])
|
||||
description_error = (
|
||||
"More than one changelog category specified: "
|
||||
f"'{category}', '{second_category}'"
|
||||
)
|
||||
return description_error, category
|
||||
|
||||
elif re.match(
|
||||
r"(?i)^[#>*_ ]*(short\s*description|change\s*log\s*entry)", lines[i]
|
||||
):
|
||||
i += 1
|
||||
# Can have one empty line between header and the entry itself.
|
||||
# Filter it out.
|
||||
if i < len(lines) and not lines[i]:
|
||||
i += 1
|
||||
# All following lines until empty one are the changelog entry.
|
||||
entry_lines = []
|
||||
while i < len(lines) and lines[i]:
|
||||
entry_lines.append(lines[i])
|
||||
i += 1
|
||||
entry = " ".join(entry_lines)
|
||||
# Don't accept changelog entries like '...'.
|
||||
entry = re.sub(r"[#>*_.\- ]", "", entry)
|
||||
# Don't accept changelog entries like 'Close #12345'.
|
||||
entry = re.sub(r"^[\w\-\s]{0,10}#?\d{5,6}\.?$", "", entry)
|
||||
else:
|
||||
i += 1
|
||||
|
||||
if not category:
|
||||
description_error = "Changelog category is empty"
|
||||
# Filter out the PR categories that are not for changelog.
|
||||
elif re.match(
|
||||
r"(?i)doc|((non|in|not|un)[-\s]*significant)|(not[ ]*for[ ]*changelog)",
|
||||
category,
|
||||
):
|
||||
pass # to not check the rest of the conditions
|
||||
elif category not in CATEGORY_TO_LABEL:
|
||||
description_error, category = f"Category '{category}' is not valid", ""
|
||||
elif not entry:
|
||||
description_error = f"Changelog entry required for category '{category}'"
|
||||
|
||||
return description_error, category
|
90
tests/ci/lambda_shared_package/lambda_shared/token.py
Normal file
90
tests/ci/lambda_shared_package/lambda_shared/token.py
Normal file
@ -0,0 +1,90 @@
|
||||
"""Module to get the token for GitHub"""
|
||||
from dataclasses import dataclass
|
||||
import json
|
||||
import time
|
||||
from typing import Tuple
|
||||
|
||||
import boto3 # type: ignore
|
||||
import jwt
|
||||
import requests # type: ignore
|
||||
|
||||
|
||||
def get_key_and_app_from_aws() -> Tuple[str, int]:
|
||||
secret_name = "clickhouse_github_secret_key"
|
||||
session = boto3.session.Session()
|
||||
client = session.client(
|
||||
service_name="secretsmanager",
|
||||
)
|
||||
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
|
||||
data = json.loads(get_secret_value_response["SecretString"])
|
||||
return data["clickhouse-app-key"], int(data["clickhouse-app-id"])
|
||||
|
||||
|
||||
def get_installation_id(jwt_token: str) -> int:
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.get("https://api.github.com/app/installations", headers=headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
for installation in data:
|
||||
if installation["account"]["login"] == "ClickHouse":
|
||||
installation_id = installation["id"]
|
||||
|
||||
return installation_id # type: ignore
|
||||
|
||||
|
||||
def get_access_token_by_jwt(jwt_token: str, installation_id: int) -> str:
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.post(
|
||||
f"https://api.github.com/app/installations/{installation_id}/access_tokens",
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data["token"] # type: ignore
|
||||
|
||||
|
||||
def get_token_from_aws() -> str:
|
||||
private_key, app_id = get_key_and_app_from_aws()
|
||||
return get_access_token_by_key_app(private_key, app_id)
|
||||
|
||||
|
||||
def get_access_token_by_key_app(private_key: str, app_id: int) -> str:
|
||||
payload = {
|
||||
"iat": int(time.time()) - 60,
|
||||
"exp": int(time.time()) + (10 * 60),
|
||||
"iss": app_id,
|
||||
}
|
||||
|
||||
encoded_jwt = jwt.encode(payload, private_key, algorithm="RS256")
|
||||
installation_id = get_installation_id(encoded_jwt)
|
||||
return get_access_token_by_jwt(encoded_jwt, installation_id)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CachedToken:
|
||||
time: int
|
||||
value: str
|
||||
updating: bool = False
|
||||
|
||||
|
||||
_cached_token = CachedToken(0, "")
|
||||
|
||||
|
||||
def get_cached_access_token() -> str:
|
||||
if time.time() - 550 < _cached_token.time or _cached_token.updating:
|
||||
return _cached_token.value
|
||||
# Indicate that the value is updating now, so the cached value can be
|
||||
# used. The first setting and close-to-ttl are not counted as update
|
||||
if _cached_token.time != 0 or time.time() - 590 < _cached_token.time:
|
||||
_cached_token.updating = True
|
||||
private_key, app_id = get_key_and_app_from_aws()
|
||||
_cached_token.time = int(time.time())
|
||||
_cached_token.value = get_access_token_by_key_app(private_key, app_id)
|
||||
_cached_token.updating = False
|
||||
return _cached_token.value
|
24
tests/ci/lambda_shared_package/pyproject.toml
Normal file
24
tests/ci/lambda_shared_package/pyproject.toml
Normal file
@ -0,0 +1,24 @@
|
||||
[build-system]
|
||||
requires = ["setuptools"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "lambda_shared"
|
||||
version = "0.0.1"
|
||||
dependencies = [
|
||||
"requests",
|
||||
"urllib3 < 2"
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
token = [
|
||||
"PyJWT",
|
||||
"cryptography",
|
||||
]
|
||||
dev = [
|
||||
"boto3",
|
||||
"lambda_shared[token]",
|
||||
]
|
||||
|
||||
[tool.distutils.bdist_wheel]
|
||||
universal = true
|
8
tests/ci/lambda_shared_package/setup.cfg
Normal file
8
tests/ci/lambda_shared_package/setup.cfg
Normal file
@ -0,0 +1,8 @@
|
||||
### This file exists for clear builds in docker ###
|
||||
# without it the `build` directory wouldn't be #
|
||||
# updated on the fly and will require manual clean #
|
||||
[build]
|
||||
build_base = /tmp/lambda_shared
|
||||
|
||||
[egg_info]
|
||||
egg_base = /tmp/
|
@ -219,6 +219,12 @@ if __name__ == "__main__":
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
|
||||
def too_many_slow(msg):
|
||||
match = re.search(r"(|.* )(\d+) slower.*", msg)
|
||||
# This threshold should be synchronized with the value in https://github.com/ClickHouse/ClickHouse/blob/master/docker/test/performance-comparison/report.py#L629
|
||||
threshold = 5
|
||||
return int(match.group(2).strip()) > threshold if match else False
|
||||
|
||||
# Try to fetch status from the report.
|
||||
status = ""
|
||||
message = ""
|
||||
@ -236,7 +242,7 @@ if __name__ == "__main__":
|
||||
|
||||
# TODO: Remove me, always green mode for the first time, unless errors
|
||||
status = "success"
|
||||
if "errors" in message.lower():
|
||||
if "errors" in message.lower() or too_many_slow(message.lower()):
|
||||
status = "failure"
|
||||
# TODO: Remove until here
|
||||
except Exception:
|
||||
|
@ -20,9 +20,11 @@ from docs_check import NAME as DOCS_NAME
|
||||
from env_helper import GITHUB_REPOSITORY, GITHUB_SERVER_URL
|
||||
from get_robot_token import get_best_robot_token
|
||||
from pr_info import FORCE_TESTS_LABEL, PRInfo
|
||||
|
||||
from cancel_and_rerun_workflow_lambda.app import CATEGORY_TO_LABEL, check_pr_description
|
||||
from workflow_approve_rerun_lambda.app import TRUSTED_CONTRIBUTORS
|
||||
from lambda_shared_package.lambda_shared.pr import (
|
||||
CATEGORY_TO_LABEL,
|
||||
TRUSTED_CONTRIBUTORS,
|
||||
check_pr_description,
|
||||
)
|
||||
|
||||
TRUSTED_ORG_IDS = {
|
||||
54801242, # clickhouse
|
||||
|
@ -2,40 +2,11 @@
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
|
||||
import boto3 # type: ignore
|
||||
import jwt
|
||||
import requests # type: ignore
|
||||
|
||||
|
||||
def get_installation_id(jwt_token):
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.get("https://api.github.com/app/installations", headers=headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
for installation in data:
|
||||
if installation["account"]["login"] == "ClickHouse":
|
||||
installation_id = installation["id"]
|
||||
return installation_id
|
||||
|
||||
|
||||
def get_access_token(jwt_token, installation_id):
|
||||
headers = {
|
||||
"Authorization": f"Bearer {jwt_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
response = requests.post(
|
||||
f"https://api.github.com/app/installations/{installation_id}/access_tokens",
|
||||
headers=headers,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return data["token"]
|
||||
from lambda_shared.token import get_cached_access_token, get_access_token_by_key_app
|
||||
|
||||
|
||||
def get_runner_registration_token(access_token):
|
||||
@ -52,32 +23,10 @@ def get_runner_registration_token(access_token):
|
||||
return data["token"]
|
||||
|
||||
|
||||
def get_key_and_app_from_aws():
|
||||
secret_name = "clickhouse_github_secret_key"
|
||||
session = boto3.session.Session()
|
||||
client = session.client(
|
||||
service_name="secretsmanager",
|
||||
)
|
||||
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
|
||||
data = json.loads(get_secret_value_response["SecretString"])
|
||||
return data["clickhouse-app-key"], int(data["clickhouse-app-id"])
|
||||
|
||||
|
||||
def main(github_secret_key, github_app_id, push_to_ssm, ssm_parameter_name):
|
||||
payload = {
|
||||
"iat": int(time.time()) - 60,
|
||||
"exp": int(time.time()) + (10 * 60),
|
||||
"iss": github_app_id,
|
||||
}
|
||||
|
||||
encoded_jwt = jwt.encode(payload, github_secret_key, algorithm="RS256")
|
||||
installation_id = get_installation_id(encoded_jwt)
|
||||
access_token = get_access_token(encoded_jwt, installation_id)
|
||||
def main(access_token, push_to_ssm, ssm_parameter_name):
|
||||
runner_registration_token = get_runner_registration_token(access_token)
|
||||
|
||||
if push_to_ssm:
|
||||
import boto3
|
||||
|
||||
print("Trying to put params into ssm manager")
|
||||
client = boto3.client("ssm")
|
||||
client.put_parameter(
|
||||
@ -94,8 +43,7 @@ def main(github_secret_key, github_app_id, push_to_ssm, ssm_parameter_name):
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
private_key, app_id = get_key_and_app_from_aws()
|
||||
main(private_key, app_id, True, "github_runner_registration_token")
|
||||
main(get_cached_access_token(), True, "github_runner_registration_token")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -140,4 +88,5 @@ if __name__ == "__main__":
|
||||
with open(args.private_key_path, "r") as key_file:
|
||||
private_key = key_file.read()
|
||||
|
||||
main(private_key, args.app_id, args.push_to_ssm, args.ssm_parameter_name)
|
||||
token = get_access_token_by_key_app(private_key, args.app_id)
|
||||
main(token, args.push_to_ssm, args.ssm_parameter_name)
|
||||
|
1
tests/ci/runner_token_rotation_lambda/lambda_shared
Symbolic link
1
tests/ci/runner_token_rotation_lambda/lambda_shared
Symbolic link
@ -0,0 +1 @@
|
||||
../lambda_shared_package/lambda_shared
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user