mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-01 03:52:15 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
b7fccd8617
@ -30,7 +30,6 @@
|
||||
* Support more variants of JOIN strictness (`LEFT/RIGHT SEMI/ANTI/ANY JOIN`) with inequality conditions which involve columns from both left and right table. e.g. `t1.y < t2.y` (see the setting `allow_experimental_join_condition`). [#64281](https://github.com/ClickHouse/ClickHouse/pull/64281) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Intrpret Hive-style partitioning for different engines (`File`, `URL`, `S3`, `AzureBlobStorage`, `HDFS`). Hive-style partitioning organizes data into partitioned sub-directories, making it efficient to query and manage large datasets. Currently, it only creates virtual columns with the appropriate name and data. The follow-up PR will introduce the appropriate data filtering (performance speedup). [#65997](https://github.com/ClickHouse/ClickHouse/pull/65997) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add function `printf` for Spark compatiability (but you can use the existing `format` function). [#66257](https://github.com/ClickHouse/ClickHouse/pull/66257) ([李扬](https://github.com/taiyang-li)).
|
||||
* Added a new server setting, `disable_insertion_and_mutation`. If it is enabled, the server will deny all insertions and mutations. This includes asynchronous INSERTs. This setting can be used to create read-only replicas. [#66519](https://github.com/ClickHouse/ClickHouse/pull/66519) ([Xu Jia](https://github.com/XuJia0210)).
|
||||
* Add options `restore_replace_external_engines_to_null` and `restore_replace_external_table_functions_to_null` to replace external engines and table_engines to `Null` engine that can be useful for testing. It should work for RESTORE and explicit table creation. [#66536](https://github.com/ClickHouse/ClickHouse/pull/66536) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Added support for reading `MULTILINESTRING` geometry in `WKT` format using function `readWKTLineString`. [#67647](https://github.com/ClickHouse/ClickHouse/pull/67647) ([Jacob Reckhard](https://github.com/jacobrec)).
|
||||
* Add a new table function `fuzzQuery`. This function allows the modification of a given query string with random variations. Example: `SELECT query FROM fuzzQuery('SELECT 1') LIMIT 5;`. [#67655](https://github.com/ClickHouse/ClickHouse/pull/67655) ([pufit](https://github.com/pufit)).
|
||||
|
@ -56,6 +56,15 @@ Other upcoming meetups
|
||||
* [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10
|
||||
* [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17
|
||||
* [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17
|
||||
* [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22
|
||||
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29
|
||||
* [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31
|
||||
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
|
||||
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||
|
||||
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -18,7 +18,9 @@
|
||||
#define Net_HTTPResponse_INCLUDED
|
||||
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "Poco/Net/HTTPCookie.h"
|
||||
#include "Poco/Net/HTTPMessage.h"
|
||||
#include "Poco/Net/Net.h"
|
||||
@ -180,6 +182,8 @@ namespace Net
|
||||
/// May throw an exception in case of a malformed
|
||||
/// Set-Cookie header.
|
||||
|
||||
void getHeaders(std::map<std::string, std::string> & headers) const;
|
||||
|
||||
void write(std::ostream & ostr) const;
|
||||
/// Writes the HTTP response to the given
|
||||
/// output stream.
|
||||
|
@ -209,6 +209,15 @@ void HTTPResponse::getCookies(std::vector<HTTPCookie>& cookies) const
|
||||
}
|
||||
}
|
||||
|
||||
void HTTPResponse::getHeaders(std::map<std::string, std::string> & headers) const
|
||||
{
|
||||
headers.clear();
|
||||
for (const auto & it : *this)
|
||||
{
|
||||
headers.emplace(it.first, it.second);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HTTPResponse::write(std::ostream& ostr) const
|
||||
{
|
||||
|
2
contrib/libfiu
vendored
2
contrib/libfiu
vendored
@ -1 +1 @@
|
||||
Subproject commit b85edbde4cf974b1b40d27828a56f0505f4e2ee5
|
||||
Subproject commit a1290d8cd3d7b4541d6c976e0a54f572ac03f2a3
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
||||
Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4
|
||||
Subproject commit 7a8967cb442b08ca20c3dd781414378e65957d37
|
@ -112,3 +112,5 @@ wadllib==1.3.6
|
||||
websocket-client==0.59.0
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
||||
deltalake==0.16.0
|
||||
|
||||
|
@ -13,7 +13,8 @@ entry="/usr/share/clickhouse-test/performance/scripts/entrypoint.sh"
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
numactl --hardware
|
||||
echo > compare.log
|
||||
numactl --hardware | tee -a compare.log
|
||||
node=$(( RANDOM % $(numactl --hardware | sed -n 's/^.*available:\(.*\)nodes.*$/\1/p') ));
|
||||
echo Will bind to NUMA node $node;
|
||||
echo Will bind to NUMA node $node | tee -a compare.log
|
||||
numactl --cpunodebind=$node --membind=$node $entry
|
||||
|
@ -6,28 +6,34 @@ sidebar_label: Iceberg
|
||||
|
||||
# Iceberg Table Engine
|
||||
|
||||
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3.
|
||||
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure and locally stored tables.
|
||||
|
||||
## Create Table
|
||||
|
||||
Note that the Iceberg table must already exist in S3, this command does not take DDL parameters to create a new table.
|
||||
Note that the Iceberg table must already exist in the storage, this command does not take DDL parameters to create a new table.
|
||||
|
||||
``` sql
|
||||
CREATE TABLE iceberg_table
|
||||
ENGINE = Iceberg(url, [aws_access_key_id, aws_secret_access_key,])
|
||||
CREATE TABLE iceberg_table_s3
|
||||
ENGINE = IcebergS3(url, [, NOSIGN | access_key_id, secret_access_key, [session_token]], format, [,compression])
|
||||
|
||||
CREATE TABLE iceberg_table_azure
|
||||
ENGINE = IcebergAzure(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression])
|
||||
|
||||
CREATE TABLE iceberg_table_local
|
||||
ENGINE = IcebergLocal(path_to_table, [,format] [,compression_method])
|
||||
```
|
||||
|
||||
**Engine parameters**
|
||||
**Engine arguments**
|
||||
|
||||
- `url` — url with the path to an existing Iceberg table.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file.
|
||||
Description of the arguments coincides with description of arguments in engines `S3`, `AzureBlobStorage` and `File` correspondingly.
|
||||
`format` stands for the format of data files in the Iceberg table.
|
||||
|
||||
Engine parameters can be specified using [Named Collections](../../../operations/named-collections.md)
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE iceberg_table ENGINE=Iceberg('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
||||
CREATE TABLE iceberg_table ENGINE=IcebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
||||
```
|
||||
|
||||
Using named collections:
|
||||
@ -45,9 +51,15 @@ Using named collections:
|
||||
```
|
||||
|
||||
```sql
|
||||
CREATE TABLE iceberg_table ENGINE=Iceberg(iceberg_conf, filename = 'test_table')
|
||||
CREATE TABLE iceberg_table ENGINE=IcebergS3(iceberg_conf, filename = 'test_table')
|
||||
|
||||
```
|
||||
|
||||
**Aliases**
|
||||
|
||||
|
||||
Table engine `Iceberg` is an alias to `IcebergS3` now.
|
||||
|
||||
## See also
|
||||
|
||||
- [iceberg table function](/docs/en/sql-reference/table-functions/iceberg.md)
|
||||
|
@ -54,7 +54,7 @@ Parameters:
|
||||
- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a
|
||||
line between two points in Euclidean space), or `cosineDistance` (the [cosine
|
||||
distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors).
|
||||
- `quantization`: either `f32`, `f16`, or `i8` for storing the vector with reduced precision (optional, default: `f32`)
|
||||
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing the vector with reduced precision (optional, default: `bf16`)
|
||||
- `m`: the number of neighbors per graph node (optional, default: 16)
|
||||
- `ef_construction`: (optional, default: 128)
|
||||
- `ef_search`: (optional, default: 64)
|
||||
|
@ -109,6 +109,7 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
||||
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
|
||||
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
|
||||
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
|
||||
- `_headers` - HTTP response headers. Type: `Map(LowCardinality(String), LowCardinality(String))`.
|
||||
|
||||
## Storage Settings {#storage-settings}
|
||||
|
||||
|
@ -1389,7 +1389,7 @@ DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : nul
|
||||
#### schema_inference_make_columns_nullable
|
||||
|
||||
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
||||
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will be `Nullable` only if `input_format_null_as_default` is disabled and the column contains `NULL` in a sample that is parsed during schema inference.
|
||||
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will never be `Nullable`, if set to `auto`, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference or file metadata contains information about column nullability.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
@ -1412,15 +1412,13 @@ DESC format(JSONEachRow, $$
|
||||
└─────────┴─────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
```sql
|
||||
SET schema_inference_make_columns_nullable = 0;
|
||||
SET input_format_null_as_default = 0;
|
||||
SET schema_inference_make_columns_nullable = 'auto';
|
||||
DESC format(JSONEachRow, $$
|
||||
{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}
|
||||
{"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]}
|
||||
$$)
|
||||
```
|
||||
```response
|
||||
|
||||
┌─name────┬─type─────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ id │ Int64 │ │ │ │ │ │
|
||||
│ age │ Int64 │ │ │ │ │ │
|
||||
@ -1432,7 +1430,6 @@ DESC format(JSONEachRow, $$
|
||||
|
||||
```sql
|
||||
SET schema_inference_make_columns_nullable = 0;
|
||||
SET input_format_null_as_default = 1;
|
||||
DESC format(JSONEachRow, $$
|
||||
{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}
|
||||
{"id" : 2, "age" : 19, "name" : "Alan", "status" : "married", "hobbies" : ["tennis", "art"]}
|
||||
|
@ -171,8 +171,8 @@ If the `schema_inference_hints` is not formated properly, or if there is a typo
|
||||
|
||||
## schema_inference_make_columns_nullable {#schema_inference_make_columns_nullable}
|
||||
|
||||
Controls making inferred types `Nullable` in schema inference for formats without information about nullability.
|
||||
If the setting is enabled, the inferred type will be `Nullable` only if column contains `NULL` in a sample that is parsed during schema inference.
|
||||
Controls making inferred types `Nullable` in schema inference.
|
||||
If the setting is enabled, all inferred type will be `Nullable`, if disabled, the inferred type will never be `Nullable`, if set to `auto`, the inferred type will be `Nullable` only if the column contains `NULL` in a sample that is parsed during schema inference or file metadata contains information about column nullability.
|
||||
|
||||
Default value: `true`.
|
||||
|
||||
|
@ -5633,7 +5633,6 @@ Default value: `1GiB`.
|
||||
## use_json_alias_for_old_object_type
|
||||
|
||||
When enabled, `JSON` data type alias will be used to create an old [Object('json')](../../sql-reference/data-types/json.md) type instead of the new [JSON](../../sql-reference/data-types/newjson.md) type.
|
||||
This setting requires server restart to take effect when changed.
|
||||
|
||||
Default value: `false`.
|
||||
|
||||
|
41
docs/en/operations/system-tables/projections.md
Normal file
41
docs/en/operations/system-tables/projections.md
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/projections
|
||||
---
|
||||
# projections
|
||||
|
||||
Contains information about existing projections in all the tables.
|
||||
|
||||
Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Database name.
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — Projection name.
|
||||
- `type` ([Enum](../../sql-reference/data-types/enum.md)) — Projection type ('Normal' = 0, 'Aggregate' = 1).
|
||||
- `sorting_key` ([Array(String)](../../sql-reference/data-types/array.md)) — Projection sorting key.
|
||||
- `query` ([String](../../sql-reference/data-types/string.md)) — Projection query.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.projections LIMIT 2 FORMAT Vertical;
|
||||
```
|
||||
|
||||
```text
|
||||
Row 1:
|
||||
──────
|
||||
database: default
|
||||
table: landing
|
||||
name: improved_sorting_key
|
||||
type: Normal
|
||||
sorting_key: ['user_id','date']
|
||||
query: SELECT * ORDER BY user_id, date
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
database: default
|
||||
table: landing
|
||||
name: agg_no_key
|
||||
type: Aggregate
|
||||
sorting_key: []
|
||||
query: SELECT count()
|
||||
```
|
@ -49,7 +49,7 @@ Result:
|
||||
|
||||
## multiIf
|
||||
|
||||
Allows to write the [CASE](../../sql-reference/operators/index.md#operator_case) operator more compactly in the query.
|
||||
Allows to write the [CASE](../../sql-reference/operators/index.md#conditional-expression) operator more compactly in the query.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -264,4 +264,4 @@ SELECT clamp(1, 2, 3) result, toTypeName(result) type;
|
||||
┌─result─┬─type────┐
|
||||
│ 2 │ Float64 │
|
||||
└────────┴─────────┘
|
||||
```
|
||||
```
|
||||
|
@ -4287,7 +4287,7 @@ Result:
|
||||
|
||||
## fromModifiedJulianDay
|
||||
|
||||
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973119` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
|
||||
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973483` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
|
||||
|
||||
**Syntax**
|
||||
|
||||
|
@ -688,6 +688,40 @@ SELECT kostikConsistentHash(16045690984833335023, 2);
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## ripeMD160
|
||||
|
||||
Produces [RIPEMD-160](https://en.wikipedia.org/wiki/RIPEMD) hash value.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
ripeMD160(input)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `input`: Input string. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A [UInt256](../data-types/int-uint.md) hash value where the 160-bit RIPEMD-160 hash is stored in the first 20 bytes. The remaining 12 bytes are zero-padded.
|
||||
|
||||
**Example**
|
||||
|
||||
Use the [hex](../functions/encoding-functions.md/#hex) function to represent the result as a hex-encoded string.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'));
|
||||
```
|
||||
|
||||
```response
|
||||
┌─hex(ripeMD160('The quick brown fox jumps over the lazy dog'))─┐
|
||||
│ 37F332F68DB77BD9D7EDD4969571AD671CF9DD3B │
|
||||
└───────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## murmurHash2_32, murmurHash2_64
|
||||
|
||||
Produces a [MurmurHash2](https://github.com/aappleby/smhasher) hash value.
|
||||
|
@ -8,7 +8,7 @@ title: "CREATE ROW POLICY"
|
||||
Creates a [row policy](../../../guides/sre/user-management/index.md#row-policy-management), i.e. a filter used to determine which rows a user can read from a table.
|
||||
|
||||
:::tip
|
||||
Row policies makes sense only for users with readonly access. If user can modify table or copy partitions between tables, it defeats the restrictions of row policies.
|
||||
Row policies make sense only for users with readonly access. If a user can modify a table or copy partitions between tables, it defeats the restrictions of row policies.
|
||||
:::
|
||||
|
||||
Syntax:
|
||||
@ -24,40 +24,40 @@ CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluste
|
||||
|
||||
## USING Clause
|
||||
|
||||
Allows to specify a condition to filter rows. An user will see a row if the condition is calculated to non-zero for the row.
|
||||
Allows specifying a condition to filter rows. A user will see a row if the condition is calculated to non-zero for the row.
|
||||
|
||||
## TO Clause
|
||||
|
||||
In the section `TO` you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`.
|
||||
In the `TO` section you can provide a list of users and roles this policy should work for. For example, `CREATE ROW POLICY ... TO accountant, john@localhost`.
|
||||
|
||||
Keyword `ALL` means all the ClickHouse users including current user. Keyword `ALL EXCEPT` allow to exclude some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
|
||||
Keyword `ALL` means all the ClickHouse users, including current user. Keyword `ALL EXCEPT` allows excluding some users from the all users list, for example, `CREATE ROW POLICY ... TO ALL EXCEPT accountant, john@localhost`
|
||||
|
||||
:::note
|
||||
If there are no row policies defined for a table then any user can `SELECT` all the row from the table. Defining one or more row policies for the table makes the access to the table depending on the row policies no matter if those row policies are defined for the current user or not. For example, the following policy
|
||||
If there are no row policies defined for a table, then any user can `SELECT` all the rows from the table. Defining one or more row policies for the table makes access to the table dependent on the row policies, no matter if those row policies are defined for the current user or not. For example, the following policy:
|
||||
|
||||
`CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter`
|
||||
|
||||
forbids the users `mira` and `peter` to see the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all.
|
||||
forbids the users `mira` and `peter` from seeing the rows with `b != 1`, and any non-mentioned user (e.g., the user `paul`) will see no rows from `mydb.table1` at all.
|
||||
|
||||
If that's not desirable it can't be fixed by adding one more row policy, like the following:
|
||||
If that's not desirable, it can be fixed by adding one more row policy, like the following:
|
||||
|
||||
`CREATE ROW POLICY pol2 ON mydb.table1 USING 1 TO ALL EXCEPT mira, peter`
|
||||
:::
|
||||
|
||||
## AS Clause
|
||||
|
||||
It's allowed to have more than one policy enabled on the same table for the same user at the one time. So we need a way to combine the conditions from multiple policies.
|
||||
It's allowed to have more than one policy enabled on the same table for the same user at one time. So we need a way to combine the conditions from multiple policies.
|
||||
|
||||
By default policies are combined using the boolean `OR` operator. For example, the following policies
|
||||
By default, policies are combined using the boolean `OR` operator. For example, the following policies:
|
||||
|
||||
``` sql
|
||||
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
||||
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 TO peter, antonio
|
||||
```
|
||||
|
||||
enables the user `peter` to see rows with either `b=1` or `c=2`.
|
||||
enable the user `peter` to see rows with either `b=1` or `c=2`.
|
||||
|
||||
The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default policies are permissive, which means they are combined using the boolean `OR` operator.
|
||||
The `AS` clause specifies how policies should be combined with other policies. Policies can be either permissive or restrictive. By default, policies are permissive, which means they are combined using the boolean `OR` operator.
|
||||
|
||||
A policy can be defined as restrictive as an alternative. Restrictive policies are combined using the boolean `AND` operator.
|
||||
|
||||
@ -68,25 +68,25 @@ row_is_visible = (one or more of the permissive policies' conditions are non-zer
|
||||
(all of the restrictive policies's conditions are non-zero)
|
||||
```
|
||||
|
||||
For example, the following policies
|
||||
For example, the following policies:
|
||||
|
||||
``` sql
|
||||
CREATE ROW POLICY pol1 ON mydb.table1 USING b=1 TO mira, peter
|
||||
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
|
||||
```
|
||||
|
||||
enables the user `peter` to see rows only if both `b=1` AND `c=2`.
|
||||
enable the user `peter` to see rows only if both `b=1` AND `c=2`.
|
||||
|
||||
Database policies are combined with table policies.
|
||||
|
||||
For example, the following policies
|
||||
For example, the following policies:
|
||||
|
||||
``` sql
|
||||
CREATE ROW POLICY pol1 ON mydb.* USING b=1 TO mira, peter
|
||||
CREATE ROW POLICY pol2 ON mydb.table1 USING c=2 AS RESTRICTIVE TO peter, antonio
|
||||
```
|
||||
|
||||
enables the user `peter` to see table1 rows only if both `b=1` AND `c=2`, although
|
||||
enable the user `peter` to see table1 rows only if both `b=1` AND `c=2`, although
|
||||
any other table in mydb would have only `b=1` policy applied for the user.
|
||||
|
||||
|
||||
|
@ -6,35 +6,37 @@ sidebar_label: iceberg
|
||||
|
||||
# iceberg Table Function
|
||||
|
||||
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3.
|
||||
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure or locally stored.
|
||||
|
||||
## Syntax
|
||||
|
||||
``` sql
|
||||
iceberg(url [,aws_access_key_id, aws_secret_access_key] [,format] [,structure])
|
||||
icebergS3(url [, NOSIGN | access_key_id, secret_access_key, [session_token]] [,format] [,compression_method])
|
||||
icebergS3(named_collection[, option=value [,..]])
|
||||
|
||||
icebergAzure(connection_string|storage_account_url, container_name, blobpath, [,account_name], [,account_key] [,format] [,compression_method])
|
||||
icebergAzure(named_collection[, option=value [,..]])
|
||||
|
||||
icebergLocal(path_to_table, [,format] [,compression_method])
|
||||
icebergLocal(named_collection[, option=value [,..]])
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
- `url` — Bucket url with the path to an existing Iceberg table in S3.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. These parameters are optional. If credentials are not specified, they are used from the ClickHouse configuration. For more information see [Using S3 for Data Storage](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3).
|
||||
- `format` — The [format](/docs/en/interfaces/formats.md/#formats) of the file. By default `Parquet` is used.
|
||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||
|
||||
Engine parameters can be specified using [Named Collections](/docs/en/operations/named-collections.md).
|
||||
Description of the arguments coincides with description of arguments in table functions `s3`, `azureBlobStorage` and `file` correspondingly.
|
||||
`format` stands for the format of data files in the Iceberg table.
|
||||
|
||||
**Returned value**
|
||||
|
||||
A table with the specified structure for reading data in the specified Iceberg table in S3.
|
||||
A table with the specified structure for reading data in the specified Iceberg table.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT * FROM iceberg('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
||||
SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_table', 'test', 'test')
|
||||
```
|
||||
|
||||
:::important
|
||||
ClickHouse currently supports reading v1 (v2 support is coming soon!) of the Iceberg format via the `iceberg` table function and `Iceberg` table engine.
|
||||
ClickHouse currently supports reading v1 and v2 of the Iceberg format via the `icebergS3`, `icebergAzure` and `icebergLocal` table functions and `IcebergS3`, `icebergAzure` ans `icebergLocal` table engines.
|
||||
:::
|
||||
|
||||
## Defining a named collection
|
||||
@ -56,10 +58,14 @@ Here is an example of configuring a named collection for storing the URL and cre
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT * FROM iceberg(iceberg_conf, filename = 'test_table')
|
||||
DESCRIBE iceberg(iceberg_conf, filename = 'test_table')
|
||||
SELECT * FROM icebergS3(iceberg_conf, filename = 'test_table')
|
||||
DESCRIBE icebergS3(iceberg_conf, filename = 'test_table')
|
||||
```
|
||||
|
||||
**Aliases**
|
||||
|
||||
Table function `iceberg` is an alias to `icebergS3` now.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [Iceberg engine](/docs/en/engines/table-engines/integrations/iceberg.md)
|
||||
|
@ -54,6 +54,7 @@ Character `|` inside patterns is used to specify failover addresses. They are it
|
||||
- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
|
||||
- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
|
||||
- `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`.
|
||||
- `_headers` - HTTP response headers. Type: `Map(LowCardinality(String), LowCardinality(String))`.
|
||||
|
||||
## Hive-style partitioning {#hive-style-partitioning}
|
||||
|
||||
|
@ -124,6 +124,40 @@ SELECT hex(sipHash128('foo', '\x01', 3));
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## ripeMD160
|
||||
|
||||
Генерирует [RIPEMD-160](https://en.wikipedia.org/wiki/RIPEMD) хеш строки.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
```sql
|
||||
ripeMD160(input)
|
||||
```
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `input`: Строка [String](../data-types/string.md)
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- [UInt256](../data-types/int-uint.md), где 160-битный хеш RIPEMD-160 хранится в первых 20 байтах. Оставшиеся 12 байт заполняются нулями.
|
||||
|
||||
**Пример**
|
||||
|
||||
Используйте функцию [hex](../functions/encoding-functions.md#hex) для представления результата в виде строки с шестнадцатеричной кодировкой
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'));
|
||||
```
|
||||
Результат:
|
||||
```response
|
||||
┌─hex(ripeMD160('The quick brown fox jumps over the lazy dog'))─┐
|
||||
│ 37F332F68DB77BD9D7EDD4969571AD671CF9DD3B │
|
||||
└───────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## cityHash64 {#cityhash64}
|
||||
|
||||
Генерирует 64-х битное значение [CityHash](https://github.com/google/cityhash).
|
||||
|
@ -280,7 +280,7 @@ SYSTEM START REPLICATION QUEUES [ON CLUSTER cluster_name] [[db.]replicated_merge
|
||||
Ждет когда таблица семейства `ReplicatedMergeTree` будет синхронизирована с другими репликами в кластере, но не более `receive_timeout` секунд:
|
||||
|
||||
``` sql
|
||||
SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
|
||||
SYSTEM SYNC REPLICA [ON CLUSTER cluster_name] [db.]replicated_merge_tree_family_table_name [STRICT | LIGHTWEIGHT [FROM 'srcReplica1'[, 'srcReplica2'[, ...]]] | PULL]
|
||||
```
|
||||
|
||||
После выполнения этого запроса таблица `[db.]replicated_merge_tree_family_table_name` загружает команды из общего реплицированного лога в свою собственную очередь репликации. Затем запрос ждет, пока реплика не обработает все загруженные команды. Поддерживаются следующие модификаторы:
|
||||
|
@ -1157,7 +1157,7 @@ SELECT toModifiedJulianDayOrNull('2020-01-01');
|
||||
|
||||
## fromModifiedJulianDay {#frommodifiedjulianday}
|
||||
|
||||
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941` 到 `2973119` 的天数(分别代表 0000-01-01 和 9999-12-31)。如果天数超出支持范围,则会引发异常。
|
||||
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941` 到 `2973483` 的天数(分别代表 0000-01-01 和 9999-12-31)。如果天数超出支持范围,则会引发异常。
|
||||
|
||||
**语法**
|
||||
|
||||
|
@ -978,6 +978,7 @@ try
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
*/
|
||||
global_context->resetSharedContext();
|
||||
global_context.reset();
|
||||
shared_context.reset();
|
||||
LOG_DEBUG(log, "Destroyed global context.");
|
||||
|
@ -209,7 +209,7 @@ std::map<std::pair<TypeIndex, String>, NodeToSubcolumnTransformer> node_transfor
|
||||
},
|
||||
};
|
||||
|
||||
std::tuple<FunctionNode *, ColumnNode *, TableNode *> getTypedNodesForOptimization(const QueryTreeNodePtr & node)
|
||||
std::tuple<FunctionNode *, ColumnNode *, TableNode *> getTypedNodesForOptimization(const QueryTreeNodePtr & node, const ContextPtr & context)
|
||||
{
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
@ -232,6 +232,12 @@ std::tuple<FunctionNode *, ColumnNode *, TableNode *> getTypedNodesForOptimizati
|
||||
const auto & storage_snapshot = table_node->getStorageSnapshot();
|
||||
auto column = first_argument_column_node->getColumn();
|
||||
|
||||
/// If view source is set we cannot optimize because it doesn't support moving functions to subcolumns.
|
||||
/// The storage is replaced to the view source but it happens only after building a query tree and applying passes.
|
||||
auto view_source = context->getViewSource();
|
||||
if (view_source && view_source->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted())
|
||||
return {};
|
||||
|
||||
if (!storage->supportsOptimizationToSubcolumns() || storage->isVirtualColumn(column.name, storage_snapshot->metadata))
|
||||
return {};
|
||||
|
||||
@ -266,7 +272,7 @@ public:
|
||||
return;
|
||||
}
|
||||
|
||||
auto [function_node, first_argument_node, table_node] = getTypedNodesForOptimization(node);
|
||||
auto [function_node, first_argument_node, table_node] = getTypedNodesForOptimization(node, getContext());
|
||||
if (function_node && first_argument_node && table_node)
|
||||
{
|
||||
enterImpl(*function_node, *first_argument_node, *table_node);
|
||||
@ -416,7 +422,7 @@ public:
|
||||
if (!getSettings().optimize_functions_to_subcolumns)
|
||||
return;
|
||||
|
||||
auto [function_node, first_argument_column_node, table_node] = getTypedNodesForOptimization(node);
|
||||
auto [function_node, first_argument_column_node, table_node] = getTypedNodesForOptimization(node, getContext());
|
||||
if (!function_node || !first_argument_column_node || !table_node)
|
||||
return;
|
||||
|
||||
|
@ -100,6 +100,7 @@ protected:
|
||||
auto buf = BuilderRWBufferFromHTTP(getPingURI())
|
||||
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
|
||||
.withTimeouts(getHTTPTimeouts())
|
||||
.withSettings(getContext()->getReadSettings())
|
||||
.create(credentials);
|
||||
|
||||
return checkString(PING_OK_ANSWER, *buf);
|
||||
@ -206,6 +207,7 @@ protected:
|
||||
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
|
||||
.withMethod(Poco::Net::HTTPRequest::HTTP_POST)
|
||||
.withTimeouts(getHTTPTimeouts())
|
||||
.withSettings(getContext()->getReadSettings())
|
||||
.create(credentials);
|
||||
|
||||
bool res = false;
|
||||
@ -232,6 +234,7 @@ protected:
|
||||
.withConnectionGroup(HTTPConnectionGroupType::STORAGE)
|
||||
.withMethod(Poco::Net::HTTPRequest::HTTP_POST)
|
||||
.withTimeouts(getHTTPTimeouts())
|
||||
.withSettings(getContext()->getReadSettings())
|
||||
.create(credentials);
|
||||
|
||||
std::string character;
|
||||
|
@ -111,6 +111,7 @@ add_headers_and_sources(dbms Storages/ObjectStorage)
|
||||
add_headers_and_sources(dbms Storages/ObjectStorage/Azure)
|
||||
add_headers_and_sources(dbms Storages/ObjectStorage/S3)
|
||||
add_headers_and_sources(dbms Storages/ObjectStorage/HDFS)
|
||||
add_headers_and_sources(dbms Storages/ObjectStorage/Local)
|
||||
add_headers_and_sources(dbms Storages/ObjectStorage/DataLakes)
|
||||
add_headers_and_sources(dbms Common/NamedCollections)
|
||||
|
||||
|
@ -300,7 +300,7 @@ void ColumnDynamic::get(size_t n, Field & res) const
|
||||
auto value_data = shared_variant.getDataAt(variant_col.offsetAt(n));
|
||||
ReadBufferFromMemory buf(value_data.data, value_data.size);
|
||||
auto type = decodeDataType(buf);
|
||||
getVariantSerialization(type)->deserializeBinary(res, buf, getFormatSettings());
|
||||
type->getDefaultSerialization()->deserializeBinary(res, buf, getFormatSettings());
|
||||
}
|
||||
|
||||
|
||||
@ -736,8 +736,7 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, Arena & arena, const
|
||||
{
|
||||
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(discr);
|
||||
encodeDataType(variant_type, buf);
|
||||
getVariantSerialization(variant_type, variant_info.variant_names[discr])
|
||||
->serializeBinary(variant_col.getVariantByGlobalDiscriminator(discr), variant_col.offsetAt(n), buf, getFormatSettings());
|
||||
variant_type->getDefaultSerialization()->serializeBinary(variant_col.getVariantByGlobalDiscriminator(discr), variant_col.offsetAt(n), buf, getFormatSettings());
|
||||
type_and_value = buf.str();
|
||||
}
|
||||
|
||||
@ -870,7 +869,7 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_
|
||||
/// We have both values serialized in binary format, so we need to
|
||||
/// create temporary column, insert both values into it and compare.
|
||||
auto tmp_column = left_data_type->createColumn();
|
||||
const auto & serialization = getVariantSerialization(left_data_type, left_data_type_name);
|
||||
const auto & serialization = left_data_type->getDefaultSerialization();
|
||||
serialization->deserializeBinary(*tmp_column, buf_left, getFormatSettings());
|
||||
serialization->deserializeBinary(*tmp_column, buf_right, getFormatSettings());
|
||||
return tmp_column->compareAt(0, 1, *tmp_column, nan_direction_hint);
|
||||
@ -892,7 +891,7 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_
|
||||
/// We have left value serialized in binary format, we need to
|
||||
/// create temporary column, insert the value into it and compare.
|
||||
auto tmp_column = left_data_type->createColumn();
|
||||
getVariantSerialization(left_data_type, left_data_type_name)->deserializeBinary(*tmp_column, buf_left, getFormatSettings());
|
||||
left_data_type->getDefaultSerialization()->deserializeBinary(*tmp_column, buf_left, getFormatSettings());
|
||||
return tmp_column->compareAt(0, right_variant.offsetAt(m), right_variant.getVariantByGlobalDiscriminator(right_discr), nan_direction_hint);
|
||||
}
|
||||
/// Check if only right value is in shared data.
|
||||
@ -912,7 +911,7 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_
|
||||
/// We have right value serialized in binary format, we need to
|
||||
/// create temporary column, insert the value into it and compare.
|
||||
auto tmp_column = right_data_type->createColumn();
|
||||
getVariantSerialization(right_data_type, right_data_type_name)->deserializeBinary(*tmp_column, buf_right, getFormatSettings());
|
||||
right_data_type->getDefaultSerialization()->deserializeBinary(*tmp_column, buf_right, getFormatSettings());
|
||||
return left_variant.getVariantByGlobalDiscriminator(left_discr).compareAt(left_variant.offsetAt(n), 0, *tmp_column, nan_direction_hint);
|
||||
}
|
||||
/// Otherwise both values are regular variants.
|
||||
|
@ -424,7 +424,7 @@ public:
|
||||
/// Insert value into shared variant. Also updates Variant discriminators and offsets.
|
||||
void insertValueIntoSharedVariant(const IColumn & src, const DataTypePtr & type, const String & type_name, size_t n);
|
||||
|
||||
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type, const String & variant_name) const
|
||||
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type, const String & variant_name)
|
||||
{
|
||||
/// Get serialization for provided data type.
|
||||
/// To avoid calling type->getDefaultSerialization() every time we use simple cache with max size.
|
||||
@ -438,7 +438,7 @@ public:
|
||||
return serialization_cache.emplace(variant_name, variant_type->getDefaultSerialization()).first->second;
|
||||
}
|
||||
|
||||
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) const { return getVariantSerialization(variant_type, variant_type->getName()); }
|
||||
const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) { return getVariantSerialization(variant_type, variant_type->getName()); }
|
||||
|
||||
private:
|
||||
void createVariantInfo(const DataTypePtr & variant_type);
|
||||
@ -483,7 +483,7 @@ private:
|
||||
/// We can use serializations of different data types to serialize values into shared variant.
|
||||
/// To avoid creating the same serialization multiple times, use simple cache.
|
||||
static const size_t SERIALIZATION_CACHE_MAX_SIZE = 256;
|
||||
mutable std::unordered_map<String, SerializationPtr> serialization_cache;
|
||||
std::unordered_map<String, SerializationPtr> serialization_cache;
|
||||
};
|
||||
|
||||
void extendVariantColumn(
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Core/Settings.h>
|
||||
#include <Poco/Environment.h>
|
||||
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
|
||||
@ -371,8 +372,8 @@ try
|
||||
/// in case of double fault.
|
||||
|
||||
LOG_FATAL(log, "########## Short fault info ############");
|
||||
LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}) (from thread {}) Received signal {}",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH,
|
||||
LOG_FATAL(log, "(version {}{}, build id: {}, git hash: {}, architecture: {}) (from thread {}) Received signal {}",
|
||||
VERSION_STRING, VERSION_OFFICIAL, daemon ? daemon->build_id : "", GIT_HASH, Poco::Environment::osArchitecture(),
|
||||
thread_num, sig);
|
||||
|
||||
std::string signal_description = "Unknown signal";
|
||||
|
@ -1120,7 +1120,7 @@ class IColumn;
|
||||
M(String, column_names_for_schema_inference, "", "The list of column names to use in schema inference for formats without column names. The format: 'column1,column2,column3,...'", 0) \
|
||||
M(String, schema_inference_hints, "", "The list of column names and types to use in schema inference for formats without column names. The format: 'column_name1 column_type1, column_name2 column_type2, ...'", 0) \
|
||||
M(SchemaInferenceMode, schema_inference_mode, "default", "Mode of schema inference. 'default' - assume that all files have the same schema and schema can be inferred from any file, 'union' - files can have different schemas and the resulting schema should be the a union of schemas of all files", 0) \
|
||||
M(Bool, schema_inference_make_columns_nullable, true, "If set to true, all inferred types will be Nullable in schema inference for formats without information about nullability.", 0) \
|
||||
M(UInt64Auto, schema_inference_make_columns_nullable, 1, "If set to true, all inferred types will be Nullable in schema inference. When set to false, no columns will be converted to Nullable. When set to 'auto', ClickHouse will use information about nullability from the data.", 0) \
|
||||
M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_read_bools_as_strings, true, "Allow to parse bools as strings in JSON input formats", 0) \
|
||||
M(Bool, input_format_json_try_infer_numbers_from_strings, false, "Try to infer numbers from string fields while schema inference", 0) \
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <cstring>
|
||||
#include <unistd.h>
|
||||
#include <algorithm>
|
||||
#include <typeinfo>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
||||
|
@ -185,7 +185,7 @@ std::unique_ptr<IDataType::SubstreamData> DataTypeDynamic::getDynamicSubcolumnDa
|
||||
auto type = decodeDataType(buf);
|
||||
if (type->getName() == subcolumn_type_name)
|
||||
{
|
||||
dynamic_column.getVariantSerialization(subcolumn_type, subcolumn_type_name)->deserializeBinary(*subcolumn, buf, format_settings);
|
||||
subcolumn_type->getDefaultSerialization()->deserializeBinary(*subcolumn, buf, format_settings);
|
||||
null_map.push_back(0);
|
||||
}
|
||||
else
|
||||
|
@ -1,10 +1,12 @@
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeObject.h>
|
||||
#include <DataTypes/DataTypeObjectDeprecated.h>
|
||||
#include <DataTypes/Serializations/SerializationJSON.h>
|
||||
#include <DataTypes/Serializations/SerializationObjectTypedPath.h>
|
||||
#include <DataTypes/Serializations/SerializationObjectDynamicPath.h>
|
||||
#include <DataTypes/Serializations/SerializationSubObject.h>
|
||||
#include <Columns/ColumnObject.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
@ -513,13 +515,24 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject::
|
||||
|
||||
static DataTypePtr createJSON(const ASTPtr & arguments)
|
||||
{
|
||||
auto context = CurrentThread::getQueryContext();
|
||||
if (!context)
|
||||
context = Context::getGlobalContextInstance();
|
||||
|
||||
if (context->getSettingsRef().use_json_alias_for_old_object_type)
|
||||
{
|
||||
if (arguments && !arguments->children.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Experimental Object type doesn't support any arguments. If you want to use new JSON type, set setting allow_experimental_json_type = 1");
|
||||
|
||||
return std::make_shared<DataTypeObjectDeprecated>("JSON", false);
|
||||
}
|
||||
|
||||
return createObject(arguments, DataTypeObject::SchemaFormat::JSON);
|
||||
}
|
||||
|
||||
void registerDataTypeJSON(DataTypeFactory & factory)
|
||||
{
|
||||
if (!Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type)
|
||||
factory.registerDataType("JSON", createJSON, DataTypeFactory::Case::Insensitive);
|
||||
factory.registerDataType("JSON", createJSON, DataTypeFactory::Case::Insensitive);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -78,10 +78,6 @@ static DataTypePtr create(const ASTPtr & arguments)
|
||||
void registerDataTypeObjectDeprecated(DataTypeFactory & factory)
|
||||
{
|
||||
factory.registerDataType("Object", create);
|
||||
if (Context::getGlobalContextInstance()->getSettingsRef().use_json_alias_for_old_object_type)
|
||||
factory.registerSimpleDataType("JSON",
|
||||
[] { return std::make_shared<DataTypeObjectDeprecated>("JSON", false); },
|
||||
DataTypeFactory::Case::Insensitive);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -489,9 +489,8 @@ void SerializationDynamic::serializeBinary(const IColumn & column, size_t row_nu
|
||||
}
|
||||
|
||||
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(global_discr);
|
||||
const auto & variant_type_name = variant_info.variant_names[global_discr];
|
||||
encodeDataType(variant_type, ostr);
|
||||
dynamic_column.getVariantSerialization(variant_type, variant_type_name)->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings);
|
||||
variant_type->getDefaultSerialization()->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings);
|
||||
}
|
||||
|
||||
template <typename ReturnType = void, typename DeserializeFunc>
|
||||
@ -629,7 +628,7 @@ static void serializeTextImpl(
|
||||
ReadBufferFromMemory buf(value.data, value.size);
|
||||
auto variant_type = decodeDataType(buf);
|
||||
auto tmp_variant_column = variant_type->createColumn();
|
||||
auto variant_serialization = dynamic_column.getVariantSerialization(variant_type);
|
||||
auto variant_serialization = variant_type->getDefaultSerialization();
|
||||
variant_serialization->deserializeBinary(*tmp_variant_column, buf, settings);
|
||||
nested_serialize(*variant_serialization, *tmp_variant_column, 0, ostr);
|
||||
}
|
||||
|
@ -35,9 +35,10 @@ class RegionsNames
|
||||
M(et, ru, 11) \
|
||||
M(pt, en, 12) \
|
||||
M(he, en, 13) \
|
||||
M(vi, en, 14)
|
||||
M(vi, en, 14) \
|
||||
M(es, en, 15)
|
||||
|
||||
static constexpr size_t total_languages = 15;
|
||||
static constexpr size_t total_languages = 16;
|
||||
|
||||
public:
|
||||
enum class Language : size_t
|
||||
|
@ -43,39 +43,21 @@ bool LocalObjectStorage::exists(const StoredObject & object) const
|
||||
std::unique_ptr<ReadBufferFromFileBase> LocalObjectStorage::readObjects( /// NOLINT
|
||||
const StoredObjects & objects,
|
||||
const ReadSettings & read_settings,
|
||||
std::optional<size_t> read_hint,
|
||||
std::optional<size_t> file_size) const
|
||||
std::optional<size_t>,
|
||||
std::optional<size_t>) const
|
||||
{
|
||||
auto modified_settings = patchSettings(read_settings);
|
||||
auto global_context = Context::getGlobalContextInstance();
|
||||
auto read_buffer_creator =
|
||||
[=] (bool /* restricted_seek */, const StoredObject & object)
|
||||
-> std::unique_ptr<ReadBufferFromFileBase>
|
||||
{
|
||||
return createReadBufferFromFileBase(object.remote_path, modified_settings, read_hint, file_size);
|
||||
};
|
||||
auto read_buffer_creator = [=](bool /* restricted_seek */, const StoredObject & object) -> std::unique_ptr<ReadBufferFromFileBase>
|
||||
{ return std::make_unique<ReadBufferFromFile>(object.remote_path); };
|
||||
|
||||
switch (read_settings.remote_fs_method)
|
||||
{
|
||||
case RemoteFSReadMethod::read:
|
||||
{
|
||||
return std::make_unique<ReadBufferFromRemoteFSGather>(
|
||||
std::move(read_buffer_creator), objects, "file:", modified_settings,
|
||||
global_context->getFilesystemCacheLog(), /* use_external_buffer */false);
|
||||
}
|
||||
case RemoteFSReadMethod::threadpool:
|
||||
{
|
||||
auto impl = std::make_unique<ReadBufferFromRemoteFSGather>(
|
||||
std::move(read_buffer_creator), objects, "file:", modified_settings,
|
||||
global_context->getFilesystemCacheLog(), /* use_external_buffer */true);
|
||||
|
||||
auto & reader = global_context->getThreadPoolReader(FilesystemReaderType::ASYNCHRONOUS_REMOTE_FS_READER);
|
||||
return std::make_unique<AsynchronousBoundedReadBuffer>(
|
||||
std::move(impl), reader, read_settings,
|
||||
global_context->getAsyncReadCounters(),
|
||||
global_context->getFilesystemReadPrefetchesLog());
|
||||
}
|
||||
}
|
||||
return std::make_unique<ReadBufferFromRemoteFSGather>(
|
||||
std::move(read_buffer_creator),
|
||||
objects,
|
||||
"file:",
|
||||
modified_settings,
|
||||
global_context->getFilesystemCacheLog(),
|
||||
/* use_external_buffer */ false);
|
||||
}
|
||||
|
||||
ReadSettings LocalObjectStorage::patchSettings(const ReadSettings & read_settings) const
|
||||
|
@ -257,7 +257,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
|
||||
format_settings.max_bytes_to_read_for_schema_inference = settings.input_format_max_bytes_to_read_for_schema_inference;
|
||||
format_settings.column_names_for_schema_inference = settings.column_names_for_schema_inference;
|
||||
format_settings.schema_inference_hints = settings.schema_inference_hints;
|
||||
format_settings.schema_inference_make_columns_nullable = settings.schema_inference_make_columns_nullable;
|
||||
format_settings.schema_inference_make_columns_nullable = settings.schema_inference_make_columns_nullable.valueOr(2);
|
||||
format_settings.mysql_dump.table_name = settings.input_format_mysql_dump_table_name;
|
||||
format_settings.mysql_dump.map_column_names = settings.input_format_mysql_dump_map_column_names;
|
||||
format_settings.sql_insert.max_batch_size = settings.output_format_sql_insert_max_batch_size;
|
||||
|
@ -77,7 +77,7 @@ struct FormatSettings
|
||||
Raw
|
||||
};
|
||||
|
||||
bool schema_inference_make_columns_nullable = true;
|
||||
UInt64 schema_inference_make_columns_nullable = 1;
|
||||
|
||||
DateTimeOutputFormat date_time_output_format = DateTimeOutputFormat::Simple;
|
||||
|
||||
|
@ -1179,6 +1179,12 @@ public:
|
||||
const FormatSettings & format_settings,
|
||||
String & error) const override
|
||||
{
|
||||
if (element.isNull() && format_settings.null_as_default)
|
||||
{
|
||||
column.insertDefault();
|
||||
return true;
|
||||
}
|
||||
|
||||
auto & tuple = assert_cast<ColumnTuple &>(column);
|
||||
size_t old_size = column.size();
|
||||
bool were_valid_elements = false;
|
||||
@ -1298,6 +1304,12 @@ public:
|
||||
const FormatSettings & format_settings,
|
||||
String & error) const override
|
||||
{
|
||||
if (element.isNull() && format_settings.null_as_default)
|
||||
{
|
||||
column.insertDefault();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!element.isObject())
|
||||
{
|
||||
error = fmt::format("cannot read Map value from JSON element: {}", jsonElementToString<JSONParser>(element, format_settings));
|
||||
@ -1362,6 +1374,14 @@ public:
|
||||
String & error) const override
|
||||
{
|
||||
auto & column_variant = assert_cast<ColumnVariant &>(column);
|
||||
|
||||
/// Check if element is NULL.
|
||||
if (element.isNull())
|
||||
{
|
||||
column_variant.insertDefault();
|
||||
return true;
|
||||
}
|
||||
|
||||
for (size_t i : order)
|
||||
{
|
||||
auto & variant = column_variant.getVariantByGlobalDiscriminator(i);
|
||||
|
@ -1344,7 +1344,11 @@ namespace
|
||||
if (checkCharCaseInsensitive('n', buf))
|
||||
{
|
||||
if (checkStringCaseInsensitive("ull", buf))
|
||||
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>());
|
||||
{
|
||||
if (settings.schema_inference_make_columns_nullable == 0)
|
||||
return std::make_shared<DataTypeNothing>();
|
||||
return makeNullable(std::make_shared<DataTypeNothing>());
|
||||
}
|
||||
else if (checkStringCaseInsensitive("an", buf))
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
}
|
||||
|
@ -19,7 +19,9 @@
|
||||
#include <Common/HashTable/Hash.h>
|
||||
|
||||
#if USE_SSL
|
||||
# include <openssl/evp.h>
|
||||
# include <openssl/md5.h>
|
||||
# include <openssl/ripemd.h>
|
||||
#endif
|
||||
|
||||
#include <bit>
|
||||
@ -196,6 +198,34 @@ T combineHashesFunc(T t1, T t2)
|
||||
return HashFunction::apply(reinterpret_cast<const char *>(hashes), sizeof(hashes));
|
||||
}
|
||||
|
||||
#if USE_SSL
|
||||
struct RipeMD160Impl
|
||||
{
|
||||
static constexpr auto name = "ripeMD160";
|
||||
using ReturnType = UInt256;
|
||||
|
||||
static UInt256 apply(const char * begin, size_t size)
|
||||
{
|
||||
UInt8 digest[RIPEMD160_DIGEST_LENGTH];
|
||||
|
||||
RIPEMD160(reinterpret_cast<const unsigned char *>(begin), size, reinterpret_cast<unsigned char *>(digest));
|
||||
|
||||
std::reverse(digest, digest + RIPEMD160_DIGEST_LENGTH);
|
||||
|
||||
UInt256 res = 0;
|
||||
std::memcpy(&res, digest, RIPEMD160_DIGEST_LENGTH);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static UInt256 combineHashes(UInt256 h1, UInt256 h2)
|
||||
{
|
||||
return combineHashesFunc<UInt256, RipeMD160Impl>(h1, h2);
|
||||
}
|
||||
|
||||
static constexpr bool use_int_hash_for_pods = false;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct SipHash64Impl
|
||||
{
|
||||
@ -1624,6 +1654,7 @@ using FunctionIntHash32 = FunctionIntHash<IntHash32Impl, NameIntHash32>;
|
||||
using FunctionIntHash64 = FunctionIntHash<IntHash64Impl, NameIntHash64>;
|
||||
#if USE_SSL
|
||||
using FunctionHalfMD5 = FunctionAnyHash<HalfMD5Impl>;
|
||||
using FunctionRipeMD160Hash = FunctionAnyHash<RipeMD160Impl>;
|
||||
#endif
|
||||
using FunctionSipHash128 = FunctionAnyHash<SipHash128Impl>;
|
||||
using FunctionSipHash128Keyed = FunctionAnyHash<SipHash128KeyedImpl, true, SipHash128KeyedImpl::Key, SipHash128KeyedImpl::KeyColumns>;
|
||||
@ -1652,6 +1683,7 @@ using FunctionXxHash64 = FunctionAnyHash<ImplXxHash64>;
|
||||
using FunctionXXH3 = FunctionAnyHash<ImplXXH3>;
|
||||
|
||||
using FunctionWyHash64 = FunctionAnyHash<ImplWyHash64>;
|
||||
|
||||
}
|
||||
|
||||
#pragma clang diagnostic pop
|
||||
|
23
src/Functions/FunctionsHashingRipe.cpp
Normal file
23
src/Functions/FunctionsHashingRipe.cpp
Normal file
@ -0,0 +1,23 @@
|
||||
#include "FunctionsHashing.h"
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
/// FunctionsHashing instantiations are separated into files FunctionsHashing*.cpp
|
||||
/// to better parallelize the build procedure and avoid MSan build failure
|
||||
/// due to excessive resource consumption.
|
||||
namespace DB
|
||||
{
|
||||
#if USE_SSL
|
||||
REGISTER_FUNCTION(HashingRipe)
|
||||
{
|
||||
factory.registerFunction<FunctionRipeMD160Hash>(FunctionDocumentation{
|
||||
.description = "RIPEMD-160 hash function, primarily used in Bitcoin address generation.",
|
||||
.examples{{"", "SELECT hex(ripeMD160('The quick brown fox jumps over the lazy dog'));", R"(
|
||||
┌─hex(ripeMD160('The quick brown fox jumps over the lazy dog'))─┐
|
||||
│ 37F332F68DB77BD9D7EDD4969571AD671CF9DD3B │
|
||||
└───────────────────────────────────────────────────────────────┘
|
||||
)"}},
|
||||
.categories{"Hash"}});
|
||||
}
|
||||
#endif
|
||||
}
|
@ -284,12 +284,12 @@ void OrdinalDate::init(int64_t modified_julian_day)
|
||||
|
||||
bool OrdinalDate::tryInit(int64_t modified_julian_day)
|
||||
{
|
||||
/// This function supports day number from -678941 to 2973119 (which represent 0000-01-01 and 9999-12-31 respectively).
|
||||
/// This function supports day number from -678941 to 2973483 (which represent 0000-01-01 and 9999-12-31 respectively).
|
||||
|
||||
if (modified_julian_day < -678941)
|
||||
return false;
|
||||
|
||||
if (modified_julian_day > 2973119)
|
||||
if (modified_julian_day > 2973483)
|
||||
return false;
|
||||
|
||||
const auto a = modified_julian_day + 678575;
|
||||
|
@ -4,17 +4,21 @@
|
||||
|
||||
#if USE_ICU
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Functions/LowerUpperImpl.h>
|
||||
#include <unicode/unistr.h>
|
||||
#include <Common/StringUtils.h>
|
||||
# include <Columns/ColumnString.h>
|
||||
# include <Functions/LowerUpperImpl.h>
|
||||
# include <unicode/ucasemap.h>
|
||||
# include <unicode/unistr.h>
|
||||
# include <unicode/urename.h>
|
||||
# include <unicode/utypes.h>
|
||||
# include <Common/StringUtils.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
template <char not_case_lower_bound, char not_case_upper_bound, bool upper>
|
||||
@ -27,7 +31,7 @@ struct LowerUpperUTF8Impl
|
||||
ColumnString::Offsets & res_offsets,
|
||||
size_t input_rows_count)
|
||||
{
|
||||
if (data.empty())
|
||||
if (input_rows_count == 0)
|
||||
return;
|
||||
|
||||
bool all_ascii = isAllASCII(data.data(), data.size());
|
||||
@ -38,39 +42,56 @@ struct LowerUpperUTF8Impl
|
||||
}
|
||||
|
||||
res_data.resize(data.size());
|
||||
res_offsets.resize_exact(offsets.size());
|
||||
res_offsets.resize_exact(input_rows_count);
|
||||
|
||||
UErrorCode error_code = U_ZERO_ERROR;
|
||||
UCaseMap * case_map = ucasemap_open("", U_FOLD_CASE_DEFAULT, &error_code);
|
||||
if (U_FAILURE(error_code))
|
||||
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Error calling ucasemap_open: {}", u_errorName(error_code));
|
||||
|
||||
String output;
|
||||
size_t curr_offset = 0;
|
||||
for (size_t i = 0; i < input_rows_count; ++i)
|
||||
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
|
||||
{
|
||||
const auto * data_start = reinterpret_cast<const char *>(&data[offsets[i - 1]]);
|
||||
size_t size = offsets[i] - offsets[i - 1];
|
||||
const auto * src = reinterpret_cast<const char *>(&data[offsets[row_i - 1]]);
|
||||
size_t src_size = offsets[row_i] - offsets[row_i - 1] - 1;
|
||||
|
||||
icu::UnicodeString input(data_start, static_cast<int32_t>(size), "UTF-8");
|
||||
int32_t dst_size;
|
||||
if constexpr (upper)
|
||||
input.toUpper();
|
||||
dst_size = ucasemap_utf8ToUpper(
|
||||
case_map, reinterpret_cast<char *>(&res_data[curr_offset]), res_data.size() - curr_offset, src, src_size, &error_code);
|
||||
else
|
||||
input.toLower();
|
||||
dst_size = ucasemap_utf8ToLower(
|
||||
case_map, reinterpret_cast<char *>(&res_data[curr_offset]), res_data.size() - curr_offset, src, src_size, &error_code);
|
||||
|
||||
output.clear();
|
||||
input.toUTF8String(output);
|
||||
if (error_code == U_BUFFER_OVERFLOW_ERROR || error_code == U_STRING_NOT_TERMINATED_WARNING)
|
||||
{
|
||||
size_t new_size = curr_offset + dst_size + 1;
|
||||
res_data.resize(new_size);
|
||||
|
||||
/// For valid UTF-8 input strings, ICU sometimes produces output with an extra '\0 at the end. Only the data before that
|
||||
/// '\0' is valid. If the input is not valid UTF-8, then the behavior of lower/upperUTF8 is undefined by definition. In this
|
||||
/// case, the behavior is also reasonable.
|
||||
size_t valid_size = output.size();
|
||||
if (!output.empty() && output.back() == '\0')
|
||||
--valid_size;
|
||||
error_code = U_ZERO_ERROR;
|
||||
if constexpr (upper)
|
||||
dst_size = ucasemap_utf8ToUpper(
|
||||
case_map, reinterpret_cast<char *>(&res_data[curr_offset]), res_data.size() - curr_offset, src, src_size, &error_code);
|
||||
else
|
||||
dst_size = ucasemap_utf8ToLower(
|
||||
case_map, reinterpret_cast<char *>(&res_data[curr_offset]), res_data.size() - curr_offset, src, src_size, &error_code);
|
||||
}
|
||||
|
||||
res_data.resize(curr_offset + valid_size + 1);
|
||||
if (error_code != U_ZERO_ERROR)
|
||||
throw DB::Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Error calling {}: {} input: {} input_size: {}",
|
||||
upper ? "ucasemap_utf8ToUpper" : "ucasemap_utf8ToLower",
|
||||
u_errorName(error_code),
|
||||
std::string_view(src, src_size),
|
||||
src_size);
|
||||
|
||||
memcpy(&res_data[curr_offset], output.data(), valid_size);
|
||||
res_data[curr_offset + valid_size] = 0;
|
||||
|
||||
curr_offset += valid_size + 1;
|
||||
res_offsets[i] = curr_offset;
|
||||
res_data[curr_offset + dst_size] = 0;
|
||||
curr_offset += dst_size + 1;
|
||||
res_offsets[row_i] = curr_offset;
|
||||
}
|
||||
|
||||
res_data.resize(curr_offset);
|
||||
}
|
||||
|
||||
static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &, size_t)
|
||||
|
@ -443,6 +443,7 @@ std::unique_ptr<ReadBuffer> ReadWriteBufferFromHTTP::initialize()
|
||||
}
|
||||
|
||||
response.getCookies(cookies);
|
||||
response.getHeaders(response_headers);
|
||||
content_encoding = response.get("Content-Encoding", "");
|
||||
|
||||
// Remember file size. It'll be used to report eof in next nextImpl() call.
|
||||
@ -680,6 +681,19 @@ std::string ReadWriteBufferFromHTTP::getResponseCookie(const std::string & name,
|
||||
return def;
|
||||
}
|
||||
|
||||
Map ReadWriteBufferFromHTTP::getResponseHeaders() const
|
||||
{
|
||||
Map map;
|
||||
for (const auto & header : response_headers)
|
||||
{
|
||||
Tuple elem;
|
||||
elem.emplace_back(header.first);
|
||||
elem.emplace_back(header.second);
|
||||
map.emplace_back(elem);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
void ReadWriteBufferFromHTTP::setNextCallback(NextCallback next_callback_)
|
||||
{
|
||||
next_callback = next_callback_;
|
||||
|
@ -90,6 +90,9 @@ private:
|
||||
std::unique_ptr<ReadBuffer> impl;
|
||||
|
||||
std::vector<Poco::Net::HTTPCookie> cookies;
|
||||
|
||||
std::map<String, String> response_headers;
|
||||
|
||||
HTTPHeaderEntries http_header_entries;
|
||||
std::function<void(size_t)> next_callback;
|
||||
|
||||
@ -187,6 +190,8 @@ public:
|
||||
|
||||
HTTPFileInfo getFileInfo();
|
||||
static HTTPFileInfo parseFileInfo(const Poco::Net::HTTPResponse & response, size_t requested_range_begin);
|
||||
|
||||
Map getResponseHeaders() const;
|
||||
};
|
||||
|
||||
using ReadWriteBufferFromHTTPPtr = std::unique_ptr<ReadWriteBufferFromHTTP>;
|
||||
|
@ -59,6 +59,18 @@ class CompiledAggregateFunctionsHolder;
|
||||
class NativeWriter;
|
||||
struct OutputBlockColumns;
|
||||
|
||||
struct GroupingSetsParams
|
||||
{
|
||||
GroupingSetsParams() = default;
|
||||
|
||||
GroupingSetsParams(Names used_keys_, Names missing_keys_) : used_keys(std::move(used_keys_)), missing_keys(std::move(missing_keys_)) { }
|
||||
|
||||
Names used_keys;
|
||||
Names missing_keys;
|
||||
};
|
||||
|
||||
using GroupingSetsParamsList = std::vector<GroupingSetsParams>;
|
||||
|
||||
/** How are "total" values calculated with WITH TOTALS?
|
||||
* (For more details, see TotalsHavingTransform.)
|
||||
*
|
||||
|
@ -389,6 +389,10 @@ AsynchronousInsertQueue::pushDataChunk(ASTPtr query, DataChunk chunk, ContextPtr
|
||||
if (data_kind == DataKind::Preprocessed)
|
||||
insert_query.format = "Native";
|
||||
|
||||
/// Query parameters make sense only for format Values.
|
||||
if (insert_query.format == "Values")
|
||||
entry->query_parameters = query_context->getQueryParameters();
|
||||
|
||||
InsertQuery key{query, query_context->getUserID(), query_context->getCurrentRoles(), settings, data_kind};
|
||||
InsertDataPtr data_to_process;
|
||||
std::future<void> insert_future;
|
||||
@ -998,6 +1002,7 @@ Chunk AsynchronousInsertQueue::processEntriesWithParsing(
|
||||
"Expected entry with data kind Parsed. Got: {}", entry->chunk.getDataKind());
|
||||
|
||||
auto buffer = std::make_unique<ReadBufferFromString>(*bytes);
|
||||
executor.setQueryParameters(entry->query_parameters);
|
||||
|
||||
size_t num_bytes = bytes->size();
|
||||
size_t num_rows = executor.execute(*buffer);
|
||||
|
@ -147,6 +147,7 @@ private:
|
||||
const String format;
|
||||
MemoryTracker * const user_memory_tracker;
|
||||
const std::chrono::time_point<std::chrono::system_clock> create_time;
|
||||
NameToNameMap query_parameters;
|
||||
|
||||
Entry(
|
||||
DataChunk && chunk_,
|
||||
|
@ -893,6 +893,12 @@ ContextData::ContextData(const ContextData &o) :
|
||||
{
|
||||
}
|
||||
|
||||
void ContextData::resetSharedContext()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_shared_context);
|
||||
shared = nullptr;
|
||||
}
|
||||
|
||||
Context::Context() = default;
|
||||
Context::Context(const Context & rhs) : ContextData(rhs), std::enable_shared_from_this<Context>(rhs) {}
|
||||
|
||||
@ -914,14 +920,6 @@ ContextMutablePtr Context::createGlobal(ContextSharedPart * shared_part)
|
||||
return res;
|
||||
}
|
||||
|
||||
void Context::initGlobal()
|
||||
{
|
||||
assert(!global_context_instance);
|
||||
global_context_instance = shared_from_this();
|
||||
DatabaseCatalog::init(shared_from_this());
|
||||
EventNotifier::init();
|
||||
}
|
||||
|
||||
SharedContextHolder Context::createShared()
|
||||
{
|
||||
return SharedContextHolder(std::make_unique<ContextSharedPart>());
|
||||
@ -2692,7 +2690,11 @@ void Context::makeSessionContext()
|
||||
|
||||
void Context::makeGlobalContext()
|
||||
{
|
||||
initGlobal();
|
||||
assert(!global_context_instance);
|
||||
global_context_instance = shared_from_this();
|
||||
DatabaseCatalog::init(shared_from_this());
|
||||
EventNotifier::init();
|
||||
|
||||
global_context = shared_from_this();
|
||||
}
|
||||
|
||||
@ -4088,8 +4090,13 @@ void Context::initializeTraceCollector()
|
||||
}
|
||||
|
||||
/// Call after unexpected crash happen.
|
||||
void Context::handleCrash() const TSA_NO_THREAD_SAFETY_ANALYSIS
|
||||
void Context::handleCrash() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_shared_context);
|
||||
if (!shared)
|
||||
return;
|
||||
|
||||
SharedLockGuard lock2(shared->mutex);
|
||||
if (shared->system_logs)
|
||||
shared->system_logs->handleCrash();
|
||||
}
|
||||
|
@ -492,6 +492,8 @@ public:
|
||||
|
||||
KitchenSink kitchen_sink;
|
||||
|
||||
void resetSharedContext();
|
||||
|
||||
protected:
|
||||
using SampleBlockCache = std::unordered_map<std::string, Block>;
|
||||
mutable SampleBlockCache sample_block_cache;
|
||||
@ -529,6 +531,10 @@ protected:
|
||||
mutable ThrottlerPtr local_write_query_throttler; /// A query-wide throttler for local IO writes
|
||||
|
||||
mutable ThrottlerPtr backups_query_throttler; /// A query-wide throttler for BACKUPs
|
||||
|
||||
mutable std::mutex mutex_shared_context; /// mutex to avoid accessing destroyed shared context pointer
|
||||
/// some Context methods can be called after the shared context is destroyed
|
||||
/// example, Context::handleCrash() method - called from signal handler
|
||||
};
|
||||
|
||||
/** A set of known objects that can be used in the query.
|
||||
@ -1387,8 +1393,6 @@ private:
|
||||
|
||||
ExternalUserDefinedExecutableFunctionsLoader & getExternalUserDefinedExecutableFunctionsLoaderWithLock(const std::lock_guard<std::mutex> & lock);
|
||||
|
||||
void initGlobal();
|
||||
|
||||
void setUserID(const UUID & user_id_);
|
||||
void setCurrentRolesImpl(const std::vector<UUID> & new_current_roles, bool throw_if_not_granted, bool skip_if_not_granted, const std::shared_ptr<const User> & user);
|
||||
|
||||
|
@ -347,6 +347,27 @@ bool shouldIgnoreQuotaAndLimits(const StorageID & table_id)
|
||||
return false;
|
||||
}
|
||||
|
||||
GroupingSetsParamsList getAggregatorGroupingSetsParams(const NamesAndTypesLists & aggregation_keys_list, const Names & all_keys)
|
||||
{
|
||||
GroupingSetsParamsList result;
|
||||
|
||||
for (const auto & aggregation_keys : aggregation_keys_list)
|
||||
{
|
||||
NameSet keys;
|
||||
for (const auto & key : aggregation_keys)
|
||||
keys.insert(key.name);
|
||||
|
||||
Names missing_keys;
|
||||
for (const auto & key : all_keys)
|
||||
if (!keys.contains(key))
|
||||
missing_keys.push_back(key);
|
||||
|
||||
result.emplace_back(aggregation_keys.getNames(), std::move(missing_keys));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
@ -2005,13 +2026,12 @@ static void executeMergeAggregatedImpl(
|
||||
bool has_grouping_sets,
|
||||
const Settings & settings,
|
||||
const NamesAndTypesList & aggregation_keys,
|
||||
const NamesAndTypesLists & aggregation_keys_list,
|
||||
const AggregateDescriptions & aggregates,
|
||||
bool should_produce_results_in_order_of_bucket_number,
|
||||
SortDescription group_by_sort_description)
|
||||
{
|
||||
auto keys = aggregation_keys.getNames();
|
||||
if (has_grouping_sets)
|
||||
keys.insert(keys.begin(), "__grouping_set");
|
||||
|
||||
/** There are two modes of distributed aggregation.
|
||||
*
|
||||
@ -2029,10 +2049,12 @@ static void executeMergeAggregatedImpl(
|
||||
*/
|
||||
|
||||
Aggregator::Params params(keys, aggregates, overflow_row, settings.max_threads, settings.max_block_size, settings.min_hit_rate_to_use_consecutive_keys_optimization);
|
||||
auto grouping_sets_params = getAggregatorGroupingSetsParams(aggregation_keys_list, keys);
|
||||
|
||||
auto merging_aggregated = std::make_unique<MergingAggregatedStep>(
|
||||
query_plan.getCurrentDataStream(),
|
||||
params,
|
||||
grouping_sets_params,
|
||||
final,
|
||||
/// Grouping sets don't work with distributed_aggregation_memory_efficient enabled (#43989)
|
||||
settings.distributed_aggregation_memory_efficient && is_remote_storage && !has_grouping_sets,
|
||||
@ -2653,30 +2675,6 @@ static Aggregator::Params getAggregatorParams(
|
||||
};
|
||||
}
|
||||
|
||||
static GroupingSetsParamsList getAggregatorGroupingSetsParams(const SelectQueryExpressionAnalyzer & query_analyzer, const Names & all_keys)
|
||||
{
|
||||
GroupingSetsParamsList result;
|
||||
if (query_analyzer.useGroupingSetKey())
|
||||
{
|
||||
auto const & aggregation_keys_list = query_analyzer.aggregationKeysList();
|
||||
|
||||
for (const auto & aggregation_keys : aggregation_keys_list)
|
||||
{
|
||||
NameSet keys;
|
||||
for (const auto & key : aggregation_keys)
|
||||
keys.insert(key.name);
|
||||
|
||||
Names missing_keys;
|
||||
for (const auto & key : all_keys)
|
||||
if (!keys.contains(key))
|
||||
missing_keys.push_back(key);
|
||||
|
||||
result.emplace_back(aggregation_keys.getNames(), std::move(missing_keys));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const ActionsAndProjectInputsFlagPtr & expression, bool overflow_row, bool final, InputOrderInfoPtr group_by_info)
|
||||
{
|
||||
executeExpression(query_plan, expression, "Before GROUP BY");
|
||||
@ -2696,7 +2694,7 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
|
||||
settings.group_by_two_level_threshold,
|
||||
settings.group_by_two_level_threshold_bytes);
|
||||
|
||||
auto grouping_sets_params = getAggregatorGroupingSetsParams(*query_analyzer, keys);
|
||||
auto grouping_sets_params = getAggregatorGroupingSetsParams(query_analyzer->aggregationKeysList(), keys);
|
||||
|
||||
SortDescription group_by_sort_description;
|
||||
SortDescription sort_description_for_merging;
|
||||
@ -2764,6 +2762,7 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool
|
||||
has_grouping_sets,
|
||||
context->getSettingsRef(),
|
||||
query_analyzer->aggregationKeys(),
|
||||
query_analyzer->aggregationKeysList(),
|
||||
query_analyzer->aggregates(),
|
||||
should_produce_results_in_order_of_bucket_number,
|
||||
std::move(group_by_sort_description));
|
||||
|
@ -504,8 +504,6 @@ void addMergingAggregatedStep(QueryPlan & query_plan,
|
||||
*/
|
||||
|
||||
auto keys = aggregation_analysis_result.aggregation_keys;
|
||||
if (!aggregation_analysis_result.grouping_sets_parameters_list.empty())
|
||||
keys.insert(keys.begin(), "__grouping_set");
|
||||
|
||||
Aggregator::Params params(keys,
|
||||
aggregation_analysis_result.aggregate_descriptions,
|
||||
@ -530,6 +528,7 @@ void addMergingAggregatedStep(QueryPlan & query_plan,
|
||||
auto merging_aggregated = std::make_unique<MergingAggregatedStep>(
|
||||
query_plan.getCurrentDataStream(),
|
||||
params,
|
||||
aggregation_analysis_result.grouping_sets_parameters_list,
|
||||
query_analysis_result.aggregate_final,
|
||||
/// Grouping sets don't work with distributed_aggregation_memory_efficient enabled (#43989)
|
||||
settings.distributed_aggregation_memory_efficient && (is_remote_storage || parallel_replicas_from_merge_tree) && !query_analysis_result.aggregation_with_rollup_or_cube_or_grouping_sets,
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Processors/Executors/StreamingFormatExecutor.h>
|
||||
#include <Processors/Transforms/AddingDefaultsTransform.h>
|
||||
#include <Processors/Formats/Impl/ValuesBlockInputFormat.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -33,6 +34,13 @@ MutableColumns StreamingFormatExecutor::getResultColumns()
|
||||
return ret_columns;
|
||||
}
|
||||
|
||||
void StreamingFormatExecutor::setQueryParameters(const NameToNameMap & parameters)
|
||||
{
|
||||
/// Query parameters make sense only for format Values.
|
||||
if (auto * values_format = typeid_cast<ValuesBlockInputFormat *>(format.get()))
|
||||
values_format->setQueryParameters(parameters);
|
||||
}
|
||||
|
||||
size_t StreamingFormatExecutor::execute(ReadBuffer & buffer)
|
||||
{
|
||||
format->setReadBuffer(buffer);
|
||||
|
@ -39,6 +39,9 @@ public:
|
||||
/// Releases currently accumulated columns.
|
||||
MutableColumns getResultColumns();
|
||||
|
||||
/// Sets query parameters for input format if applicable.
|
||||
void setQueryParameters(const NameToNameMap & parameters);
|
||||
|
||||
private:
|
||||
void setCheckpoints();
|
||||
|
||||
|
@ -54,13 +54,8 @@ void checkFinalInferredType(
|
||||
type = default_type;
|
||||
}
|
||||
|
||||
if (settings.schema_inference_make_columns_nullable)
|
||||
if (settings.schema_inference_make_columns_nullable == 1)
|
||||
type = makeNullableRecursively(type);
|
||||
/// In case when data for some column could contain nulls and regular values,
|
||||
/// resulting inferred type is Nullable.
|
||||
/// If input_format_null_as_default is enabled, we should remove Nullable type.
|
||||
else if (settings.null_as_default)
|
||||
type = removeNullable(type);
|
||||
}
|
||||
|
||||
void ISchemaReader::transformTypesIfNeeded(DB::DataTypePtr & type, DB::DataTypePtr & new_type)
|
||||
|
@ -204,8 +204,11 @@ NamesAndTypesList ArrowSchemaReader::readSchema()
|
||||
schema = file_reader->schema();
|
||||
|
||||
auto header = ArrowColumnToCHColumn::arrowSchemaToCHHeader(
|
||||
*schema, stream ? "ArrowStream" : "Arrow", format_settings.arrow.skip_columns_with_unsupported_types_in_schema_inference);
|
||||
if (format_settings.schema_inference_make_columns_nullable)
|
||||
*schema,
|
||||
stream ? "ArrowStream" : "Arrow",
|
||||
format_settings.arrow.skip_columns_with_unsupported_types_in_schema_inference,
|
||||
format_settings.schema_inference_make_columns_nullable != 0);
|
||||
if (format_settings.schema_inference_make_columns_nullable == 1)
|
||||
return getNamesAndRecursivelyNullableTypes(header);
|
||||
return header.getNamesAndTypesList();
|
||||
}
|
||||
|
@ -727,6 +727,7 @@ struct ReadColumnFromArrowColumnSettings
|
||||
FormatSettings::DateTimeOverflowBehavior date_time_overflow_behavior;
|
||||
bool allow_arrow_null_type;
|
||||
bool skip_columns_with_unsupported_types;
|
||||
bool allow_inferring_nullable_columns;
|
||||
};
|
||||
|
||||
static ColumnWithTypeAndName readColumnFromArrowColumn(
|
||||
@ -1109,7 +1110,7 @@ static ColumnWithTypeAndName readColumnFromArrowColumn(
|
||||
bool is_map_nested_column,
|
||||
const ReadColumnFromArrowColumnSettings & settings)
|
||||
{
|
||||
bool read_as_nullable_column = arrow_column->null_count() || is_nullable_column || (type_hint && type_hint->isNullable());
|
||||
bool read_as_nullable_column = (arrow_column->null_count() || is_nullable_column || (type_hint && type_hint->isNullable())) && settings.allow_inferring_nullable_columns;
|
||||
if (read_as_nullable_column &&
|
||||
arrow_column->type()->id() != arrow::Type::LIST &&
|
||||
arrow_column->type()->id() != arrow::Type::LARGE_LIST &&
|
||||
@ -1173,14 +1174,16 @@ static std::shared_ptr<arrow::ChunkedArray> createArrowColumn(const std::shared_
|
||||
Block ArrowColumnToCHColumn::arrowSchemaToCHHeader(
|
||||
const arrow::Schema & schema,
|
||||
const std::string & format_name,
|
||||
bool skip_columns_with_unsupported_types)
|
||||
bool skip_columns_with_unsupported_types,
|
||||
bool allow_inferring_nullable_columns)
|
||||
{
|
||||
ReadColumnFromArrowColumnSettings settings
|
||||
{
|
||||
.format_name = format_name,
|
||||
.date_time_overflow_behavior = FormatSettings::DateTimeOverflowBehavior::Ignore,
|
||||
.allow_arrow_null_type = false,
|
||||
.skip_columns_with_unsupported_types = skip_columns_with_unsupported_types
|
||||
.skip_columns_with_unsupported_types = skip_columns_with_unsupported_types,
|
||||
.allow_inferring_nullable_columns = allow_inferring_nullable_columns,
|
||||
};
|
||||
|
||||
ColumnsWithTypeAndName sample_columns;
|
||||
@ -1254,7 +1257,8 @@ Chunk ArrowColumnToCHColumn::arrowColumnsToCHChunk(const NameToArrowColumn & nam
|
||||
.format_name = format_name,
|
||||
.date_time_overflow_behavior = date_time_overflow_behavior,
|
||||
.allow_arrow_null_type = true,
|
||||
.skip_columns_with_unsupported_types = false
|
||||
.skip_columns_with_unsupported_types = false,
|
||||
.allow_inferring_nullable_columns = true
|
||||
};
|
||||
|
||||
Columns columns;
|
||||
|
@ -34,7 +34,8 @@ public:
|
||||
static Block arrowSchemaToCHHeader(
|
||||
const arrow::Schema & schema,
|
||||
const std::string & format_name,
|
||||
bool skip_columns_with_unsupported_types = false);
|
||||
bool skip_columns_with_unsupported_types = false,
|
||||
bool allow_inferring_nullable_columns = true);
|
||||
|
||||
struct DictionaryInfo
|
||||
{
|
||||
|
@ -1002,7 +1002,7 @@ NamesAndTypesList NativeORCSchemaReader::readSchema()
|
||||
header.insert(ColumnWithTypeAndName{type, name});
|
||||
}
|
||||
|
||||
if (format_settings.schema_inference_make_columns_nullable)
|
||||
if (format_settings.schema_inference_make_columns_nullable == 1)
|
||||
return getNamesAndRecursivelyNullableTypes(header);
|
||||
return header.getNamesAndTypesList();
|
||||
}
|
||||
|
@ -160,8 +160,11 @@ NamesAndTypesList ORCSchemaReader::readSchema()
|
||||
{
|
||||
initializeIfNeeded();
|
||||
auto header = ArrowColumnToCHColumn::arrowSchemaToCHHeader(
|
||||
*schema, "ORC", format_settings.orc.skip_columns_with_unsupported_types_in_schema_inference);
|
||||
if (format_settings.schema_inference_make_columns_nullable)
|
||||
*schema,
|
||||
"ORC",
|
||||
format_settings.orc.skip_columns_with_unsupported_types_in_schema_inference,
|
||||
format_settings.schema_inference_make_columns_nullable != 0);
|
||||
if (format_settings.schema_inference_make_columns_nullable == 1)
|
||||
return getNamesAndRecursivelyNullableTypes(header);
|
||||
return header.getNamesAndTypesList();
|
||||
}
|
||||
|
@ -869,8 +869,11 @@ NamesAndTypesList ParquetSchemaReader::readSchema()
|
||||
THROW_ARROW_NOT_OK(parquet::arrow::FromParquetSchema(metadata->schema(), &schema));
|
||||
|
||||
auto header = ArrowColumnToCHColumn::arrowSchemaToCHHeader(
|
||||
*schema, "Parquet", format_settings.parquet.skip_columns_with_unsupported_types_in_schema_inference);
|
||||
if (format_settings.schema_inference_make_columns_nullable)
|
||||
*schema,
|
||||
"Parquet",
|
||||
format_settings.parquet.skip_columns_with_unsupported_types_in_schema_inference,
|
||||
format_settings.schema_inference_make_columns_nullable != 0);
|
||||
if (format_settings.schema_inference_make_columns_nullable == 1)
|
||||
return getNamesAndRecursivelyNullableTypes(header);
|
||||
return header.getNamesAndTypesList();
|
||||
}
|
||||
|
@ -663,6 +663,16 @@ void ValuesBlockInputFormat::resetReadBuffer()
|
||||
IInputFormat::resetReadBuffer();
|
||||
}
|
||||
|
||||
void ValuesBlockInputFormat::setQueryParameters(const NameToNameMap & parameters)
|
||||
{
|
||||
if (parameters == context->getQueryParameters())
|
||||
return;
|
||||
|
||||
auto context_copy = Context::createCopy(context);
|
||||
context_copy->setQueryParameters(parameters);
|
||||
context = std::move(context_copy);
|
||||
}
|
||||
|
||||
ValuesSchemaReader::ValuesSchemaReader(ReadBuffer & in_, const FormatSettings & format_settings_)
|
||||
: IRowSchemaReader(buf, format_settings_), buf(in_)
|
||||
{
|
||||
|
@ -38,6 +38,7 @@ public:
|
||||
|
||||
/// TODO: remove context somehow.
|
||||
void setContext(const ContextPtr & context_) { context = Context::createCopy(context_); }
|
||||
void setQueryParameters(const NameToNameMap & parameters);
|
||||
|
||||
const BlockMissingValues & getMissingValues() const override { return block_missing_values; }
|
||||
|
||||
|
@ -151,6 +151,61 @@ void AggregatingStep::applyOrder(SortDescription sort_description_for_merging_,
|
||||
explicit_sorting_required_for_aggregation_in_order = false;
|
||||
}
|
||||
|
||||
ActionsDAG AggregatingStep::makeCreatingMissingKeysForGroupingSetDAG(
|
||||
const Block & in_header,
|
||||
const Block & out_header,
|
||||
const GroupingSetsParamsList & grouping_sets_params,
|
||||
UInt64 group,
|
||||
bool group_by_use_nulls)
|
||||
{
|
||||
/// Here we create a DAG which fills missing keys and adds `__grouping_set` column
|
||||
ActionsDAG dag(in_header.getColumnsWithTypeAndName());
|
||||
ActionsDAG::NodeRawConstPtrs outputs;
|
||||
outputs.reserve(out_header.columns() + 1);
|
||||
|
||||
auto grouping_col = ColumnConst::create(ColumnUInt64::create(1, group), 0);
|
||||
const auto * grouping_node = &dag.addColumn(
|
||||
{ColumnPtr(std::move(grouping_col)), std::make_shared<DataTypeUInt64>(), "__grouping_set"});
|
||||
|
||||
grouping_node = &dag.materializeNode(*grouping_node);
|
||||
outputs.push_back(grouping_node);
|
||||
|
||||
const auto & missing_columns = grouping_sets_params[group].missing_keys;
|
||||
const auto & used_keys = grouping_sets_params[group].used_keys;
|
||||
|
||||
auto to_nullable_function = FunctionFactory::instance().get("toNullable", nullptr);
|
||||
for (size_t i = 0; i < out_header.columns(); ++i)
|
||||
{
|
||||
const auto & col = out_header.getByPosition(i);
|
||||
const auto missing_it = std::find_if(
|
||||
missing_columns.begin(), missing_columns.end(), [&](const auto & missing_col) { return missing_col == col.name; });
|
||||
const auto used_it = std::find_if(
|
||||
used_keys.begin(), used_keys.end(), [&](const auto & used_col) { return used_col == col.name; });
|
||||
if (missing_it != missing_columns.end())
|
||||
{
|
||||
auto column_with_default = col.column->cloneEmpty();
|
||||
col.type->insertDefaultInto(*column_with_default);
|
||||
column_with_default->finalize();
|
||||
|
||||
auto column = ColumnConst::create(std::move(column_with_default), 0);
|
||||
const auto * node = &dag.addColumn({ColumnPtr(std::move(column)), col.type, col.name});
|
||||
node = &dag.materializeNode(*node);
|
||||
outputs.push_back(node);
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto * column_node = dag.getOutputs()[in_header.getPositionByName(col.name)];
|
||||
if (used_it != used_keys.end() && group_by_use_nulls && column_node->result_type->canBeInsideNullable())
|
||||
outputs.push_back(&dag.addFunction(to_nullable_function, { column_node }, col.name));
|
||||
else
|
||||
outputs.push_back(column_node);
|
||||
}
|
||||
}
|
||||
|
||||
dag.getOutputs().swap(outputs);
|
||||
return dag;
|
||||
}
|
||||
|
||||
void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings)
|
||||
{
|
||||
QueryPipelineProcessorsCollector collector(pipeline, this);
|
||||
@ -300,51 +355,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
|
||||
{
|
||||
const auto & header = ports[set_counter]->getHeader();
|
||||
|
||||
/// Here we create a DAG which fills missing keys and adds `__grouping_set` column
|
||||
ActionsDAG dag(header.getColumnsWithTypeAndName());
|
||||
ActionsDAG::NodeRawConstPtrs outputs;
|
||||
outputs.reserve(output_header.columns() + 1);
|
||||
|
||||
auto grouping_col = ColumnConst::create(ColumnUInt64::create(1, set_counter), 0);
|
||||
const auto * grouping_node = &dag.addColumn(
|
||||
{ColumnPtr(std::move(grouping_col)), std::make_shared<DataTypeUInt64>(), "__grouping_set"});
|
||||
|
||||
grouping_node = &dag.materializeNode(*grouping_node);
|
||||
outputs.push_back(grouping_node);
|
||||
|
||||
const auto & missing_columns = grouping_sets_params[set_counter].missing_keys;
|
||||
const auto & used_keys = grouping_sets_params[set_counter].used_keys;
|
||||
|
||||
auto to_nullable_function = FunctionFactory::instance().get("toNullable", nullptr);
|
||||
for (size_t i = 0; i < output_header.columns(); ++i)
|
||||
{
|
||||
auto & col = output_header.getByPosition(i);
|
||||
const auto missing_it = std::find_if(
|
||||
missing_columns.begin(), missing_columns.end(), [&](const auto & missing_col) { return missing_col == col.name; });
|
||||
const auto used_it = std::find_if(
|
||||
used_keys.begin(), used_keys.end(), [&](const auto & used_col) { return used_col == col.name; });
|
||||
if (missing_it != missing_columns.end())
|
||||
{
|
||||
auto column_with_default = col.column->cloneEmpty();
|
||||
col.type->insertDefaultInto(*column_with_default);
|
||||
column_with_default->finalize();
|
||||
|
||||
auto column = ColumnConst::create(std::move(column_with_default), 0);
|
||||
const auto * node = &dag.addColumn({ColumnPtr(std::move(column)), col.type, col.name});
|
||||
node = &dag.materializeNode(*node);
|
||||
outputs.push_back(node);
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto * column_node = dag.getOutputs()[header.getPositionByName(col.name)];
|
||||
if (used_it != used_keys.end() && group_by_use_nulls && column_node->result_type->canBeInsideNullable())
|
||||
outputs.push_back(&dag.addFunction(to_nullable_function, { column_node }, col.name));
|
||||
else
|
||||
outputs.push_back(column_node);
|
||||
}
|
||||
}
|
||||
|
||||
dag.getOutputs().swap(outputs);
|
||||
auto dag = makeCreatingMissingKeysForGroupingSetDAG(header, output_header, grouping_sets_params, set_counter, group_by_use_nulls);
|
||||
auto expression = std::make_shared<ExpressionActions>(std::move(dag), settings.getActionsSettings());
|
||||
auto transform = std::make_shared<ExpressionTransform>(header, expression);
|
||||
|
||||
|
@ -7,18 +7,6 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct GroupingSetsParams
|
||||
{
|
||||
GroupingSetsParams() = default;
|
||||
|
||||
GroupingSetsParams(Names used_keys_, Names missing_keys_) : used_keys(std::move(used_keys_)), missing_keys(std::move(missing_keys_)) { }
|
||||
|
||||
Names used_keys;
|
||||
Names missing_keys;
|
||||
};
|
||||
|
||||
using GroupingSetsParamsList = std::vector<GroupingSetsParams>;
|
||||
|
||||
Block appendGroupingSetColumn(Block header);
|
||||
Block generateOutputHeader(const Block & input_header, const Names & keys, bool use_nulls);
|
||||
|
||||
@ -77,6 +65,13 @@ public:
|
||||
/// Argument input_stream would be the second input (from projection).
|
||||
std::unique_ptr<AggregatingProjectionStep> convertToAggregatingProjection(const DataStream & input_stream) const;
|
||||
|
||||
static ActionsDAG makeCreatingMissingKeysForGroupingSetDAG(
|
||||
const Block & in_header,
|
||||
const Block & out_header,
|
||||
const GroupingSetsParamsList & grouping_sets_params,
|
||||
UInt64 group,
|
||||
bool group_by_use_nulls);
|
||||
|
||||
private:
|
||||
void updateOutputStream() override;
|
||||
|
||||
|
@ -10,6 +10,11 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static bool memoryBoundMergingWillBeUsed(
|
||||
const DataStream & input_stream,
|
||||
bool memory_bound_merging_of_aggregation_results_enabled,
|
||||
@ -37,6 +42,7 @@ static ITransformingStep::Traits getTraits(bool should_produce_results_in_order_
|
||||
MergingAggregatedStep::MergingAggregatedStep(
|
||||
const DataStream & input_stream_,
|
||||
Aggregator::Params params_,
|
||||
GroupingSetsParamsList grouping_sets_params_,
|
||||
bool final_,
|
||||
bool memory_efficient_aggregation_,
|
||||
size_t max_threads_,
|
||||
@ -48,9 +54,10 @@ MergingAggregatedStep::MergingAggregatedStep(
|
||||
bool memory_bound_merging_of_aggregation_results_enabled_)
|
||||
: ITransformingStep(
|
||||
input_stream_,
|
||||
params_.getHeader(input_stream_.header, final_),
|
||||
MergingAggregatedTransform::appendGroupingIfNeeded(input_stream_.header, params_.getHeader(input_stream_.header, final_)),
|
||||
getTraits(should_produce_results_in_order_of_bucket_number_))
|
||||
, params(std::move(params_))
|
||||
, grouping_sets_params(std::move(grouping_sets_params_))
|
||||
, final(final_)
|
||||
, memory_efficient_aggregation(memory_efficient_aggregation_)
|
||||
, max_threads(max_threads_)
|
||||
@ -89,10 +96,13 @@ void MergingAggregatedStep::applyOrder(SortDescription sort_description, DataStr
|
||||
|
||||
void MergingAggregatedStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
|
||||
{
|
||||
auto transform_params = std::make_shared<AggregatingTransformParams>(pipeline.getHeader(), std::move(params), final);
|
||||
|
||||
if (memoryBoundMergingWillBeUsed())
|
||||
{
|
||||
if (input_streams.front().header.has("__grouping_set") || !grouping_sets_params.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Memory bound merging of aggregated results is not supported for grouping sets.");
|
||||
|
||||
auto transform_params = std::make_shared<AggregatingTransformParams>(pipeline.getHeader(), std::move(params), final);
|
||||
auto transform = std::make_shared<FinishAggregatingInOrderTransform>(
|
||||
pipeline.getHeader(),
|
||||
pipeline.getNumStreams(),
|
||||
@ -127,15 +137,19 @@ void MergingAggregatedStep::transformPipeline(QueryPipelineBuilder & pipeline, c
|
||||
pipeline.resize(1);
|
||||
|
||||
/// Now merge the aggregated blocks
|
||||
pipeline.addSimpleTransform([&](const Block & header)
|
||||
{ return std::make_shared<MergingAggregatedTransform>(header, transform_params, max_threads); });
|
||||
auto transform = std::make_shared<MergingAggregatedTransform>(pipeline.getHeader(), params, final, grouping_sets_params, max_threads);
|
||||
pipeline.addTransform(std::move(transform));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (input_streams.front().header.has("__grouping_set") || !grouping_sets_params.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Memory efficient merging of aggregated results is not supported for grouping sets.");
|
||||
auto num_merge_threads = memory_efficient_merge_threads
|
||||
? memory_efficient_merge_threads
|
||||
: max_threads;
|
||||
|
||||
auto transform_params = std::make_shared<AggregatingTransformParams>(pipeline.getHeader(), std::move(params), final);
|
||||
pipeline.addMergingAggregatedMemoryEfficientTransform(transform_params, num_merge_threads);
|
||||
}
|
||||
|
||||
@ -154,7 +168,9 @@ void MergingAggregatedStep::describeActions(JSONBuilder::JSONMap & map) const
|
||||
|
||||
void MergingAggregatedStep::updateOutputStream()
|
||||
{
|
||||
output_stream = createOutputStream(input_streams.front(), params.getHeader(input_streams.front().header, final), getDataStreamTraits());
|
||||
const auto & in_header = input_streams.front().header;
|
||||
output_stream = createOutputStream(input_streams.front(),
|
||||
MergingAggregatedTransform::appendGroupingIfNeeded(in_header, params.getHeader(in_header, final)), getDataStreamTraits());
|
||||
if (is_order_overwritten) /// overwrite order again
|
||||
applyOrder(group_by_sort_description, overwritten_sort_scope);
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ public:
|
||||
MergingAggregatedStep(
|
||||
const DataStream & input_stream_,
|
||||
Aggregator::Params params_,
|
||||
GroupingSetsParamsList grouping_sets_params_,
|
||||
bool final_,
|
||||
bool memory_efficient_aggregation_,
|
||||
size_t max_threads_,
|
||||
@ -43,6 +44,7 @@ private:
|
||||
|
||||
|
||||
Aggregator::Params params;
|
||||
GroupingSetsParamsList grouping_sets_params;
|
||||
bool final;
|
||||
bool memory_efficient_aggregation;
|
||||
size_t max_threads;
|
||||
|
@ -255,7 +255,7 @@ void buildSortingDAG(QueryPlan::Node & node, std::optional<ActionsDAG> & dag, Fi
|
||||
|
||||
/// Add more functions to fixed columns.
|
||||
/// Functions result is fixed if all arguments are fixed or constants.
|
||||
void enreachFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns)
|
||||
void enrichFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns)
|
||||
{
|
||||
struct Frame
|
||||
{
|
||||
@ -300,20 +300,20 @@ void enreachFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns)
|
||||
{
|
||||
if (frame.node->function_base->isDeterministicInScopeOfQuery())
|
||||
{
|
||||
//std::cerr << "*** enreachFixedColumns check " << frame.node->result_name << std::endl;
|
||||
//std::cerr << "*** enrichFixedColumns check " << frame.node->result_name << std::endl;
|
||||
bool all_args_fixed_or_const = true;
|
||||
for (const auto * child : frame.node->children)
|
||||
{
|
||||
if (!child->column && !fixed_columns.contains(child))
|
||||
{
|
||||
//std::cerr << "*** enreachFixedColumns fail " << child->result_name << ' ' << static_cast<const void *>(child) << std::endl;
|
||||
//std::cerr << "*** enrichFixedColumns fail " << child->result_name << ' ' << static_cast<const void *>(child) << std::endl;
|
||||
all_args_fixed_or_const = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (all_args_fixed_or_const)
|
||||
{
|
||||
//std::cerr << "*** enreachFixedColumns add " << frame.node->result_name << ' ' << static_cast<const void *>(frame.node) << std::endl;
|
||||
//std::cerr << "*** enrichFixedColumns add " << frame.node->result_name << ' ' << static_cast<const void *>(frame.node) << std::endl;
|
||||
fixed_columns.insert(frame.node);
|
||||
}
|
||||
}
|
||||
@ -357,7 +357,7 @@ InputOrderInfoPtr buildInputOrderInfo(
|
||||
}
|
||||
}
|
||||
|
||||
enreachFixedColumns(sorting_key_dag, fixed_key_columns);
|
||||
enrichFixedColumns(sorting_key_dag, fixed_key_columns);
|
||||
}
|
||||
|
||||
/// This is a result direction we will read from MergeTree
|
||||
@ -530,7 +530,7 @@ AggregationInputOrder buildInputOrderInfo(
|
||||
}
|
||||
}
|
||||
|
||||
enreachFixedColumns(sorting_key_dag, fixed_key_columns);
|
||||
enrichFixedColumns(sorting_key_dag, fixed_key_columns);
|
||||
|
||||
for (const auto * output : dag->getOutputs())
|
||||
{
|
||||
@ -804,7 +804,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n
|
||||
buildSortingDAG(node, dag, fixed_columns, limit);
|
||||
|
||||
if (dag && !fixed_columns.empty())
|
||||
enreachFixedColumns(*dag, fixed_columns);
|
||||
enrichFixedColumns(*dag, fixed_columns);
|
||||
|
||||
if (auto * reading = typeid_cast<ReadFromMergeTree *>(reading_node->step.get()))
|
||||
{
|
||||
@ -858,7 +858,7 @@ AggregationInputOrder buildInputOrderInfo(AggregatingStep & aggregating, QueryPl
|
||||
buildSortingDAG(node, dag, fixed_columns, limit);
|
||||
|
||||
if (dag && !fixed_columns.empty())
|
||||
enreachFixedColumns(*dag, fixed_columns);
|
||||
enrichFixedColumns(*dag, fixed_columns);
|
||||
|
||||
if (auto * reading = typeid_cast<ReadFromMergeTree *>(reading_node->step.get()))
|
||||
{
|
||||
|
@ -1,7 +1,10 @@
|
||||
#include <Processors/Transforms/MergingAggregatedTransform.h>
|
||||
#include <Processors/Transforms/AggregatingTransform.h>
|
||||
#include <Processors/Transforms/AggregatingInOrderTransform.h>
|
||||
#include <Processors/QueryPlan/AggregatingStep.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -10,11 +13,192 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
MergingAggregatedTransform::MergingAggregatedTransform(
|
||||
Block header_, AggregatingTransformParamsPtr params_, size_t max_threads_)
|
||||
: IAccumulatingTransform(std::move(header_), params_->getHeader())
|
||||
, params(std::move(params_)), max_threads(max_threads_)
|
||||
Block MergingAggregatedTransform::appendGroupingIfNeeded(const Block & in_header, Block out_header)
|
||||
{
|
||||
/// __grouping_set is neither GROUP BY key nor an aggregate function.
|
||||
/// It behaves like a GROUP BY key, but we cannot append it to keys
|
||||
/// because it changes hashing method and buckets for two level aggregation.
|
||||
/// Now, this column is processed "manually" by merging each group separately.
|
||||
if (in_header.has("__grouping_set"))
|
||||
out_header.insert(0, in_header.getByName("__grouping_set"));
|
||||
|
||||
return out_header;
|
||||
}
|
||||
|
||||
/// We should keep the order for GROUPING SET keys.
|
||||
/// Initiator creates a separate Aggregator for every group, so should we do here.
|
||||
/// Otherwise, two-level aggregation will split the data into different buckets,
|
||||
/// and the result may have duplicating rows.
|
||||
static ActionsDAG makeReorderingActions(const Block & in_header, const GroupingSetsParams & params)
|
||||
{
|
||||
ActionsDAG reordering(in_header.getColumnsWithTypeAndName());
|
||||
auto & outputs = reordering.getOutputs();
|
||||
ActionsDAG::NodeRawConstPtrs new_outputs;
|
||||
new_outputs.reserve(in_header.columns() + params.used_keys.size() - params.used_keys.size());
|
||||
|
||||
std::unordered_map<std::string_view, size_t> index;
|
||||
for (size_t pos = 0; pos < outputs.size(); ++pos)
|
||||
index.emplace(outputs[pos]->result_name, pos);
|
||||
|
||||
for (const auto & used_name : params.used_keys)
|
||||
{
|
||||
auto & idx = index[used_name];
|
||||
new_outputs.push_back(outputs[idx]);
|
||||
}
|
||||
|
||||
for (const auto & used_name : params.used_keys)
|
||||
index[used_name] = outputs.size();
|
||||
for (const auto & missing_name : params.missing_keys)
|
||||
index[missing_name] = outputs.size();
|
||||
|
||||
for (const auto * output : outputs)
|
||||
{
|
||||
if (index[output->result_name] != outputs.size())
|
||||
new_outputs.push_back(output);
|
||||
}
|
||||
|
||||
outputs.swap(new_outputs);
|
||||
return reordering;
|
||||
}
|
||||
|
||||
MergingAggregatedTransform::~MergingAggregatedTransform() = default;
|
||||
|
||||
MergingAggregatedTransform::MergingAggregatedTransform(
|
||||
Block header_,
|
||||
Aggregator::Params params,
|
||||
bool final,
|
||||
GroupingSetsParamsList grouping_sets_params,
|
||||
size_t max_threads_)
|
||||
: IAccumulatingTransform(header_, appendGroupingIfNeeded(header_, params.getHeader(header_, final)))
|
||||
, max_threads(max_threads_)
|
||||
{
|
||||
if (!grouping_sets_params.empty())
|
||||
{
|
||||
if (!header_.has("__grouping_set"))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Cannot find __grouping_set column in header of MergingAggregatedTransform with grouping sets."
|
||||
"Header {}", header_.dumpStructure());
|
||||
|
||||
auto in_header = header_;
|
||||
in_header.erase(header_.getPositionByName("__grouping_set"));
|
||||
auto out_header = params.getHeader(header_, final);
|
||||
|
||||
grouping_sets.reserve(grouping_sets_params.size());
|
||||
for (const auto & grouping_set_params : grouping_sets_params)
|
||||
{
|
||||
size_t group = grouping_sets.size();
|
||||
|
||||
auto reordering = makeReorderingActions(in_header, grouping_set_params);
|
||||
|
||||
Aggregator::Params set_params(grouping_set_params.used_keys,
|
||||
params.aggregates,
|
||||
params.overflow_row,
|
||||
params.max_threads,
|
||||
params.max_block_size,
|
||||
params.min_hit_rate_to_use_consecutive_keys_optimization);
|
||||
|
||||
auto transform_params = std::make_shared<AggregatingTransformParams>(reordering.updateHeader(in_header), std::move(set_params), final);
|
||||
|
||||
auto creating = AggregatingStep::makeCreatingMissingKeysForGroupingSetDAG(
|
||||
transform_params->getHeader(),
|
||||
out_header,
|
||||
grouping_sets_params, group, false);
|
||||
|
||||
auto & groupiung_set = grouping_sets.emplace_back();
|
||||
groupiung_set.reordering_key_columns_actions = std::make_shared<ExpressionActions>(std::move(reordering));
|
||||
groupiung_set.creating_missing_keys_actions = std::make_shared<ExpressionActions>(std::move(creating));
|
||||
groupiung_set.params = std::move(transform_params);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
auto & groupiung_set = grouping_sets.emplace_back();
|
||||
groupiung_set.params = std::make_shared<AggregatingTransformParams>(header_, std::move(params), final);
|
||||
}
|
||||
}
|
||||
|
||||
void MergingAggregatedTransform::addBlock(Block block)
|
||||
{
|
||||
if (grouping_sets.size() == 1)
|
||||
{
|
||||
auto bucket = block.info.bucket_num;
|
||||
if (grouping_sets[0].reordering_key_columns_actions)
|
||||
grouping_sets[0].reordering_key_columns_actions->execute(block);
|
||||
grouping_sets[0].bucket_to_blocks[bucket].emplace_back(std::move(block));
|
||||
return;
|
||||
}
|
||||
|
||||
auto grouping_position = block.getPositionByName("__grouping_set");
|
||||
auto grouping_column = block.getByPosition(grouping_position).column;
|
||||
block.erase(grouping_position);
|
||||
|
||||
/// Split a block by __grouping_set values.
|
||||
|
||||
const auto * grouping_column_typed = typeid_cast<const ColumnUInt64 *>(grouping_column.get());
|
||||
if (!grouping_column_typed)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected UInt64 column for __grouping_set, got {}", grouping_column->getName());
|
||||
|
||||
IColumn::Selector selector;
|
||||
|
||||
const auto & grouping_data = grouping_column_typed->getData();
|
||||
size_t num_rows = grouping_data.size();
|
||||
UInt64 last_group = grouping_data[0];
|
||||
UInt64 max_group = last_group;
|
||||
for (size_t row = 1; row < num_rows; ++row)
|
||||
{
|
||||
auto group = grouping_data[row];
|
||||
|
||||
/// Optimization for equal ranges.
|
||||
if (last_group == group)
|
||||
continue;
|
||||
|
||||
/// Optimization for single group.
|
||||
if (selector.empty())
|
||||
selector.reserve(num_rows);
|
||||
|
||||
/// Fill the last equal range.
|
||||
selector.resize_fill(row, last_group);
|
||||
last_group = group;
|
||||
max_group = std::max(last_group, max_group);
|
||||
}
|
||||
|
||||
if (max_group >= grouping_sets.size())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Invalid group number {}. Number of groups {}.", last_group, grouping_sets.size());
|
||||
|
||||
/// Optimization for single group.
|
||||
if (selector.empty())
|
||||
{
|
||||
auto bucket = block.info.bucket_num;
|
||||
grouping_sets[last_group].reordering_key_columns_actions->execute(block);
|
||||
grouping_sets[last_group].bucket_to_blocks[bucket].emplace_back(std::move(block));
|
||||
return;
|
||||
}
|
||||
|
||||
/// Fill the last equal range.
|
||||
selector.resize_fill(num_rows, last_group);
|
||||
|
||||
const size_t num_groups = max_group + 1;
|
||||
Blocks splitted_blocks(num_groups);
|
||||
|
||||
for (size_t group_id = 0; group_id < num_groups; ++group_id)
|
||||
splitted_blocks[group_id] = block.cloneEmpty();
|
||||
|
||||
size_t columns_in_block = block.columns();
|
||||
for (size_t col_idx_in_block = 0; col_idx_in_block < columns_in_block; ++col_idx_in_block)
|
||||
{
|
||||
MutableColumns splitted_columns = block.getByPosition(col_idx_in_block).column->scatter(num_groups, selector);
|
||||
for (size_t group_id = 0; group_id < num_groups; ++group_id)
|
||||
splitted_blocks[group_id].getByPosition(col_idx_in_block).column = std::move(splitted_columns[group_id]);
|
||||
}
|
||||
|
||||
for (size_t group = 0; group < num_groups; ++group)
|
||||
{
|
||||
auto & splitted_block = splitted_blocks[group];
|
||||
splitted_block.info = block.info;
|
||||
grouping_sets[group].reordering_key_columns_actions->execute(splitted_block);
|
||||
grouping_sets[group].bucket_to_blocks[block.info.bucket_num].emplace_back(std::move(splitted_block));
|
||||
}
|
||||
}
|
||||
|
||||
void MergingAggregatedTransform::consume(Chunk chunk)
|
||||
@ -46,7 +230,7 @@ void MergingAggregatedTransform::consume(Chunk chunk)
|
||||
block.info.is_overflows = agg_info->is_overflows;
|
||||
block.info.bucket_num = agg_info->bucket_num;
|
||||
|
||||
bucket_to_blocks[agg_info->bucket_num].emplace_back(std::move(block));
|
||||
addBlock(std::move(block));
|
||||
}
|
||||
else if (chunk.getChunkInfos().get<ChunkInfoWithAllocatedBytes>())
|
||||
{
|
||||
@ -54,7 +238,7 @@ void MergingAggregatedTransform::consume(Chunk chunk)
|
||||
block.info.is_overflows = false;
|
||||
block.info.bucket_num = -1;
|
||||
|
||||
bucket_to_blocks[block.info.bucket_num].emplace_back(std::move(block));
|
||||
addBlock(std::move(block));
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Chunk should have AggregatedChunkInfo in MergingAggregatedTransform.");
|
||||
@ -70,9 +254,23 @@ Chunk MergingAggregatedTransform::generate()
|
||||
/// Exception safety. Make iterator valid in case any method below throws.
|
||||
next_block = blocks.begin();
|
||||
|
||||
/// TODO: this operation can be made async. Add async for IAccumulatingTransform.
|
||||
params->aggregator.mergeBlocks(std::move(bucket_to_blocks), data_variants, max_threads, is_cancelled);
|
||||
blocks = params->aggregator.convertToBlocks(data_variants, params->final, max_threads);
|
||||
for (auto & grouping_set : grouping_sets)
|
||||
{
|
||||
auto & params = grouping_set.params;
|
||||
auto & bucket_to_blocks = grouping_set.bucket_to_blocks;
|
||||
AggregatedDataVariants data_variants;
|
||||
|
||||
/// TODO: this operation can be made async. Add async for IAccumulatingTransform.
|
||||
params->aggregator.mergeBlocks(std::move(bucket_to_blocks), data_variants, max_threads, is_cancelled);
|
||||
auto merged_blocks = params->aggregator.convertToBlocks(data_variants, params->final, max_threads);
|
||||
|
||||
if (grouping_set.creating_missing_keys_actions)
|
||||
for (auto & block : merged_blocks)
|
||||
grouping_set.creating_missing_keys_actions->execute(block);
|
||||
|
||||
blocks.splice(blocks.end(), std::move(merged_blocks));
|
||||
}
|
||||
|
||||
next_block = blocks.begin();
|
||||
}
|
||||
|
||||
|
@ -6,26 +6,46 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ExpressionActions;
|
||||
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
|
||||
|
||||
/** A pre-aggregate stream of blocks in which each block is already aggregated.
|
||||
* Aggregate functions in blocks should not be finalized so that their states can be merged.
|
||||
*/
|
||||
class MergingAggregatedTransform : public IAccumulatingTransform
|
||||
{
|
||||
public:
|
||||
MergingAggregatedTransform(Block header_, AggregatingTransformParamsPtr params_, size_t max_threads_);
|
||||
MergingAggregatedTransform(
|
||||
Block header_,
|
||||
Aggregator::Params params_,
|
||||
bool final_,
|
||||
GroupingSetsParamsList grouping_sets_params,
|
||||
size_t max_threads_);
|
||||
|
||||
~MergingAggregatedTransform() override;
|
||||
|
||||
String getName() const override { return "MergingAggregatedTransform"; }
|
||||
|
||||
static Block appendGroupingIfNeeded(const Block & in_header, Block out_header);
|
||||
|
||||
protected:
|
||||
void consume(Chunk chunk) override;
|
||||
Chunk generate() override;
|
||||
|
||||
private:
|
||||
AggregatingTransformParamsPtr params;
|
||||
LoggerPtr log = getLogger("MergingAggregatedTransform");
|
||||
size_t max_threads;
|
||||
|
||||
AggregatedDataVariants data_variants;
|
||||
Aggregator::BucketToBlocks bucket_to_blocks;
|
||||
struct GroupingSet
|
||||
{
|
||||
Aggregator::BucketToBlocks bucket_to_blocks;
|
||||
ExpressionActionsPtr reordering_key_columns_actions;
|
||||
ExpressionActionsPtr creating_missing_keys_actions;
|
||||
AggregatingTransformParamsPtr params;
|
||||
};
|
||||
|
||||
using GroupingSets = std::vector<GroupingSet>;
|
||||
GroupingSets grouping_sets;
|
||||
|
||||
UInt64 total_input_rows = 0;
|
||||
UInt64 total_input_blocks = 0;
|
||||
@ -35,6 +55,8 @@ private:
|
||||
|
||||
bool consume_started = false;
|
||||
bool generate_started = false;
|
||||
|
||||
void addBlock(Block block);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -807,7 +807,7 @@ MergeTreeDataPartBuilder IMergeTreeDataPart::getProjectionPartBuilder(const Stri
|
||||
const char * projection_extension = is_temp_projection ? ".tmp_proj" : ".proj";
|
||||
auto projection_storage = getDataPartStorage().getProjection(projection_name + projection_extension, !is_temp_projection);
|
||||
MergeTreeDataPartBuilder builder(storage, projection_name, projection_storage);
|
||||
return builder.withPartInfo({"all", 0, 0, 0}).withParentPart(this);
|
||||
return builder.withPartInfo(MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION).withParentPart(this);
|
||||
}
|
||||
|
||||
void IMergeTreeDataPart::addProjectionPart(
|
||||
@ -1334,17 +1334,6 @@ void IMergeTreeDataPart::loadRowsCount()
|
||||
auto buf = metadata_manager->read("count.txt");
|
||||
readIntText(rows_count, *buf);
|
||||
assertEOF(*buf);
|
||||
|
||||
if (!index_granularity.empty() && rows_count < index_granularity.getTotalRows() && index_granularity_info.fixed_index_granularity)
|
||||
{
|
||||
/// Adjust last granule size to match the number of rows in the part in case of fixed index_granularity.
|
||||
index_granularity.popMark();
|
||||
index_granularity.appendMark(rows_count % index_granularity_info.fixed_index_granularity);
|
||||
if (rows_count != index_granularity.getTotalRows())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Index granularity total rows in part {} does not match rows_count: {}, instead of {}",
|
||||
name, index_granularity.getTotalRows(), rows_count);
|
||||
}
|
||||
};
|
||||
|
||||
if (index_granularity.empty())
|
||||
|
@ -6,10 +6,18 @@
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
const MergeTreePartInfo MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION = {"all", 0, 0, 0};
|
||||
|
||||
MergeListElement::MergeListElement(const StorageID & table_id_, FutureMergedMutatedPartPtr future_part, const ContextPtr & context)
|
||||
: table_id{table_id_}
|
||||
, partition_id{future_part->part_info.partition_id}
|
||||
@ -21,8 +29,23 @@ MergeListElement::MergeListElement(const StorageID & table_id_, FutureMergedMuta
|
||||
, merge_type{future_part->merge_type}
|
||||
, merge_algorithm{MergeAlgorithm::Undecided}
|
||||
{
|
||||
auto format_version = MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING;
|
||||
if (result_part_name != result_part_info.getPartNameV1())
|
||||
format_version = MERGE_TREE_DATA_OLD_FORMAT_VERSION;
|
||||
|
||||
/// FIXME why do we need a merge list element for projection parts at all?
|
||||
bool is_fake_projection_part = future_part->part_info == FAKE_RESULT_PART_FOR_PROJECTION;
|
||||
|
||||
size_t normal_parts_count = 0;
|
||||
for (const auto & source_part : future_part->parts)
|
||||
{
|
||||
if (!is_fake_projection_part && !source_part->getParentPart())
|
||||
{
|
||||
++normal_parts_count;
|
||||
if (!result_part_info.contains(MergeTreePartInfo::fromPartName(source_part->name, format_version)))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Source part {} is not covered by result part {}", source_part->name, result_part_info.getPartNameV1());
|
||||
}
|
||||
|
||||
source_part_names.emplace_back(source_part->name);
|
||||
source_part_paths.emplace_back(source_part->getDataPartStorage().getFullPath());
|
||||
|
||||
@ -35,13 +58,17 @@ MergeListElement::MergeListElement(const StorageID & table_id_, FutureMergedMuta
|
||||
if (!future_part->parts.empty())
|
||||
{
|
||||
source_data_version = future_part->parts[0]->info.getDataVersion();
|
||||
is_mutation = (result_part_info.getDataVersion() != source_data_version);
|
||||
is_mutation = (result_part_info.level == future_part->parts[0]->info.level) && !is_fake_projection_part;
|
||||
|
||||
WriteBufferFromString out(partition);
|
||||
const auto & part = future_part->parts[0];
|
||||
part->partition.serializeText(part->storage, out, {});
|
||||
}
|
||||
|
||||
if (!is_fake_projection_part && is_mutation && normal_parts_count != 1)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got {} source parts for mutation {}: {}", future_part->parts.size(),
|
||||
result_part_info.getPartNameV1(), fmt::join(source_part_names, ", "));
|
||||
|
||||
thread_group = ThreadGroup::createForBackgroundProcess(context);
|
||||
}
|
||||
|
||||
|
@ -66,6 +66,8 @@ struct Settings;
|
||||
|
||||
struct MergeListElement : boost::noncopyable
|
||||
{
|
||||
static const MergeTreePartInfo FAKE_RESULT_PART_FOR_PROJECTION;
|
||||
|
||||
const StorageID table_id;
|
||||
std::string partition_id;
|
||||
std::string partition;
|
||||
|
95
src/Storages/MergeTree/MergeProjectionPartsTask.cpp
Normal file
95
src/Storages/MergeTree/MergeProjectionPartsTask.cpp
Normal file
@ -0,0 +1,95 @@
|
||||
#include <Storages/MergeTree/MergeProjectionPartsTask.h>
|
||||
|
||||
#include <Common/TransactionID.h>
|
||||
#include <Storages/MergeTree/MergeList.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool MergeProjectionPartsTask::executeStep()
|
||||
{
|
||||
auto & current_level_parts = level_parts[current_level];
|
||||
auto & next_level_parts = level_parts[next_level];
|
||||
|
||||
MergeTreeData::MutableDataPartsVector selected_parts;
|
||||
while (selected_parts.size() < max_parts_to_merge_in_one_level && !current_level_parts.empty())
|
||||
{
|
||||
selected_parts.push_back(std::move(current_level_parts.back()));
|
||||
current_level_parts.pop_back();
|
||||
}
|
||||
|
||||
if (selected_parts.empty())
|
||||
{
|
||||
if (next_level_parts.empty())
|
||||
{
|
||||
LOG_WARNING(log, "There is no projection parts merged");
|
||||
|
||||
/// Task is finished
|
||||
return false;
|
||||
}
|
||||
current_level = next_level;
|
||||
++next_level;
|
||||
}
|
||||
else if (selected_parts.size() == 1)
|
||||
{
|
||||
if (next_level_parts.empty())
|
||||
{
|
||||
LOG_DEBUG(log, "Merged a projection part in level {}", current_level);
|
||||
selected_parts[0]->renameTo(projection.name + ".proj", true);
|
||||
selected_parts[0]->setName(projection.name);
|
||||
selected_parts[0]->is_temp = false;
|
||||
new_data_part->addProjectionPart(name, std::move(selected_parts[0]));
|
||||
|
||||
/// Task is finished
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_DEBUG(log, "Forwarded part {} in level {} to next level", selected_parts[0]->name, current_level);
|
||||
next_level_parts.push_back(std::move(selected_parts[0]));
|
||||
}
|
||||
}
|
||||
else if (selected_parts.size() > 1)
|
||||
{
|
||||
// Generate a unique part name
|
||||
++block_num;
|
||||
auto projection_future_part = std::make_shared<FutureMergedMutatedPart>();
|
||||
MergeTreeData::DataPartsVector const_selected_parts(
|
||||
std::make_move_iterator(selected_parts.begin()), std::make_move_iterator(selected_parts.end()));
|
||||
projection_future_part->assign(std::move(const_selected_parts));
|
||||
projection_future_part->name = fmt::format("{}_{}", projection.name, ++block_num);
|
||||
projection_future_part->part_info = {"all", 0, 0, 0};
|
||||
|
||||
MergeTreeData::MergingParams projection_merging_params;
|
||||
projection_merging_params.mode = MergeTreeData::MergingParams::Ordinary;
|
||||
if (projection.type == ProjectionDescription::Type::Aggregate)
|
||||
projection_merging_params.mode = MergeTreeData::MergingParams::Aggregating;
|
||||
|
||||
LOG_DEBUG(log, "Merged {} parts in level {} to {}", selected_parts.size(), current_level, projection_future_part->name);
|
||||
auto tmp_part_merge_task = mutator->mergePartsToTemporaryPart(
|
||||
projection_future_part,
|
||||
projection.metadata,
|
||||
merge_entry,
|
||||
std::make_unique<MergeListElement>((*merge_entry)->table_id, projection_future_part, context),
|
||||
*table_lock_holder,
|
||||
time_of_merge,
|
||||
context,
|
||||
space_reservation,
|
||||
false, // TODO Do we need deduplicate for projections
|
||||
{},
|
||||
false, // no cleanup
|
||||
projection_merging_params,
|
||||
NO_TRANSACTION_PTR,
|
||||
/* need_prefix */ true,
|
||||
new_data_part.get(),
|
||||
".tmp_proj");
|
||||
|
||||
next_level_parts.push_back(executeHere(tmp_part_merge_task));
|
||||
next_level_parts.back()->is_temp = true;
|
||||
}
|
||||
|
||||
/// Need execute again
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
84
src/Storages/MergeTree/MergeProjectionPartsTask.h
Normal file
84
src/Storages/MergeTree/MergeProjectionPartsTask.h
Normal file
@ -0,0 +1,84 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/StorageID.h>
|
||||
#include <Storages/MergeTree/IExecutableTask.h>
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataMergerMutator.h>
|
||||
#include <Storages/MergeTree/MergeProgress.h>
|
||||
#include <Storages/MergeTree/FutureMergedMutatedPart.h>
|
||||
#include <Storages/ProjectionsDescription.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
class MergeProjectionPartsTask : public IExecutableTask
|
||||
{
|
||||
public:
|
||||
|
||||
MergeProjectionPartsTask(
|
||||
String name_,
|
||||
MergeTreeData::MutableDataPartsVector && parts_,
|
||||
const ProjectionDescription & projection_,
|
||||
size_t & block_num_,
|
||||
ContextPtr context_,
|
||||
TableLockHolder * table_lock_holder_,
|
||||
MergeTreeDataMergerMutator * mutator_,
|
||||
MergeListEntry * merge_entry_,
|
||||
time_t time_of_merge_,
|
||||
MergeTreeData::MutableDataPartPtr new_data_part_,
|
||||
ReservationSharedPtr space_reservation_)
|
||||
: name(std::move(name_))
|
||||
, parts(std::move(parts_))
|
||||
, projection(projection_)
|
||||
, block_num(block_num_)
|
||||
, context(context_)
|
||||
, table_lock_holder(table_lock_holder_)
|
||||
, mutator(mutator_)
|
||||
, merge_entry(merge_entry_)
|
||||
, time_of_merge(time_of_merge_)
|
||||
, new_data_part(new_data_part_)
|
||||
, space_reservation(space_reservation_)
|
||||
, log(getLogger("MergeProjectionPartsTask"))
|
||||
{
|
||||
LOG_DEBUG(log, "Selected {} projection_parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name);
|
||||
level_parts[current_level] = std::move(parts);
|
||||
}
|
||||
|
||||
void onCompleted() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
|
||||
StorageID getStorageID() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
|
||||
Priority getPriority() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
|
||||
String getQueryId() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
|
||||
|
||||
bool executeStep() override;
|
||||
|
||||
private:
|
||||
String name;
|
||||
MergeTreeData::MutableDataPartsVector parts;
|
||||
const ProjectionDescription & projection;
|
||||
size_t & block_num;
|
||||
|
||||
ContextPtr context;
|
||||
TableLockHolder * table_lock_holder;
|
||||
MergeTreeDataMergerMutator * mutator;
|
||||
MergeListEntry * merge_entry;
|
||||
time_t time_of_merge;
|
||||
|
||||
MergeTreeData::MutableDataPartPtr new_data_part;
|
||||
ReservationSharedPtr space_reservation;
|
||||
|
||||
LoggerPtr log;
|
||||
|
||||
std::map<size_t, MergeTreeData::MutableDataPartsVector> level_parts;
|
||||
size_t current_level = 0;
|
||||
size_t next_level = 1;
|
||||
|
||||
/// TODO(nikitamikhaylov): make this constant a setting
|
||||
static constexpr size_t max_parts_to_merge_in_one_level = 10;
|
||||
};
|
||||
|
||||
}
|
@ -21,6 +21,8 @@
|
||||
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||
#include <Storages/MergeTree/FutureMergedMutatedPart.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataMergerMutator.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataWriter.h>
|
||||
#include <Storages/MergeTree/MergeProjectionPartsTask.h>
|
||||
#include <Processors/Transforms/ExpressionTransform.h>
|
||||
#include <Processors/Transforms/MaterializingTransform.h>
|
||||
#include <Processors/Transforms/FilterTransform.h>
|
||||
@ -63,6 +65,7 @@ namespace ErrorCodes
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
}
|
||||
|
||||
|
||||
static ColumnsStatistics getStatisticsForColumns(
|
||||
const NamesAndTypesList & columns_to_read,
|
||||
const StorageMetadataPtr & metadata_snapshot)
|
||||
@ -155,6 +158,13 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::extractMergingAndGatheringColu
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto * projection : global_ctx->projections_to_rebuild)
|
||||
{
|
||||
Names projection_columns_vec = projection->getRequiredColumns();
|
||||
std::copy(projection_columns_vec.cbegin(), projection_columns_vec.cend(),
|
||||
std::inserter(key_columns, key_columns.end()));
|
||||
}
|
||||
|
||||
/// TODO: also force "summing" and "aggregating" columns to make Horizontal merge only for such columns
|
||||
|
||||
for (const auto & column : global_ctx->storage_columns)
|
||||
@ -254,6 +264,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare()
|
||||
extendObjectColumns(global_ctx->storage_columns, object_columns, false);
|
||||
global_ctx->storage_snapshot = std::make_shared<StorageSnapshot>(*global_ctx->data, global_ctx->metadata_snapshot, std::move(object_columns));
|
||||
|
||||
prepareProjectionsToMergeAndRebuild();
|
||||
|
||||
extractMergingAndGatheringColumns();
|
||||
|
||||
global_ctx->new_data_part->uuid = global_ctx->future_part->uuid;
|
||||
@ -517,6 +529,148 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::execute()
|
||||
}
|
||||
|
||||
|
||||
void MergeTask::ExecuteAndFinalizeHorizontalPart::prepareProjectionsToMergeAndRebuild() const
|
||||
{
|
||||
const auto mode = global_ctx->data->getSettings()->deduplicate_merge_projection_mode;
|
||||
/// Under throw mode, we still choose to drop projections due to backward compatibility since some
|
||||
/// users might have projections before this change.
|
||||
if (global_ctx->data->merging_params.mode != MergeTreeData::MergingParams::Ordinary
|
||||
&& (mode == DeduplicateMergeProjectionMode::THROW || mode == DeduplicateMergeProjectionMode::DROP))
|
||||
return;
|
||||
|
||||
/// These merging modes may or may not reduce number of rows. It's not known until the horizontal stage is finished.
|
||||
const bool merge_may_reduce_rows =
|
||||
global_ctx->cleanup ||
|
||||
global_ctx->deduplicate ||
|
||||
ctx->merging_params.mode == MergeTreeData::MergingParams::Collapsing ||
|
||||
ctx->merging_params.mode == MergeTreeData::MergingParams::Replacing ||
|
||||
ctx->merging_params.mode == MergeTreeData::MergingParams::VersionedCollapsing;
|
||||
|
||||
const auto & projections = global_ctx->metadata_snapshot->getProjections();
|
||||
|
||||
for (const auto & projection : projections)
|
||||
{
|
||||
if (merge_may_reduce_rows)
|
||||
{
|
||||
global_ctx->projections_to_rebuild.push_back(&projection);
|
||||
continue;
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartsVector projection_parts;
|
||||
for (const auto & part : global_ctx->future_part->parts)
|
||||
{
|
||||
auto it = part->getProjectionParts().find(projection.name);
|
||||
if (it != part->getProjectionParts().end() && !it->second->is_broken)
|
||||
projection_parts.push_back(it->second);
|
||||
}
|
||||
if (projection_parts.size() == global_ctx->future_part->parts.size())
|
||||
{
|
||||
global_ctx->projections_to_merge.push_back(&projection);
|
||||
global_ctx->projections_to_merge_parts[projection.name].assign(projection_parts.begin(), projection_parts.end());
|
||||
}
|
||||
else
|
||||
{
|
||||
chassert(projection_parts.size() < global_ctx->future_part->parts.size());
|
||||
LOG_DEBUG(ctx->log, "Projection {} is not merged because some parts don't have it", projection.name);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
const auto & settings = global_ctx->context->getSettingsRef();
|
||||
|
||||
for (const auto * projection : global_ctx->projections_to_rebuild)
|
||||
ctx->projection_squashes.emplace_back(projection->sample_block.cloneEmpty(),
|
||||
settings.min_insert_block_size_rows, settings.min_insert_block_size_bytes);
|
||||
}
|
||||
|
||||
|
||||
void MergeTask::ExecuteAndFinalizeHorizontalPart::calculateProjections(const Block & block) const
|
||||
{
|
||||
for (size_t i = 0, size = global_ctx->projections_to_rebuild.size(); i < size; ++i)
|
||||
{
|
||||
const auto & projection = *global_ctx->projections_to_rebuild[i];
|
||||
Block block_to_squash = projection.calculate(block, global_ctx->context);
|
||||
auto & projection_squash_plan = ctx->projection_squashes[i];
|
||||
projection_squash_plan.setHeader(block_to_squash.cloneEmpty());
|
||||
Chunk squashed_chunk = Squashing::squash(projection_squash_plan.add({block_to_squash.getColumns(), block_to_squash.rows()}));
|
||||
if (squashed_chunk)
|
||||
{
|
||||
auto result = projection_squash_plan.getHeader().cloneWithColumns(squashed_chunk.detachColumns());
|
||||
auto tmp_part = MergeTreeDataWriter::writeTempProjectionPart(
|
||||
*global_ctx->data, ctx->log, result, projection, global_ctx->new_data_part.get(), ++ctx->projection_block_num);
|
||||
tmp_part.finalize();
|
||||
tmp_part.part->getDataPartStorage().commitTransaction();
|
||||
ctx->projection_parts[projection.name].emplace_back(std::move(tmp_part.part));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MergeTask::ExecuteAndFinalizeHorizontalPart::finalizeProjections() const
|
||||
{
|
||||
for (size_t i = 0, size = global_ctx->projections_to_rebuild.size(); i < size; ++i)
|
||||
{
|
||||
const auto & projection = *global_ctx->projections_to_rebuild[i];
|
||||
auto & projection_squash_plan = ctx->projection_squashes[i];
|
||||
auto squashed_chunk = Squashing::squash(projection_squash_plan.flush());
|
||||
if (squashed_chunk)
|
||||
{
|
||||
auto result = projection_squash_plan.getHeader().cloneWithColumns(squashed_chunk.detachColumns());
|
||||
auto temp_part = MergeTreeDataWriter::writeTempProjectionPart(
|
||||
*global_ctx->data, ctx->log, result, projection, global_ctx->new_data_part.get(), ++ctx->projection_block_num);
|
||||
temp_part.finalize();
|
||||
temp_part.part->getDataPartStorage().commitTransaction();
|
||||
ctx->projection_parts[projection.name].emplace_back(std::move(temp_part.part));
|
||||
}
|
||||
}
|
||||
|
||||
ctx->projection_parts_iterator = std::make_move_iterator(ctx->projection_parts.begin());
|
||||
if (ctx->projection_parts_iterator != std::make_move_iterator(ctx->projection_parts.end()))
|
||||
constructTaskForProjectionPartsMerge();
|
||||
}
|
||||
|
||||
|
||||
void MergeTask::ExecuteAndFinalizeHorizontalPart::constructTaskForProjectionPartsMerge() const
|
||||
{
|
||||
auto && [name, parts] = *ctx->projection_parts_iterator;
|
||||
const auto & projection = global_ctx->metadata_snapshot->projections.get(name);
|
||||
|
||||
ctx->merge_projection_parts_task_ptr = std::make_unique<MergeProjectionPartsTask>
|
||||
(
|
||||
name,
|
||||
std::move(parts),
|
||||
projection,
|
||||
ctx->projection_block_num,
|
||||
global_ctx->context,
|
||||
global_ctx->holder,
|
||||
global_ctx->mutator,
|
||||
global_ctx->merge_entry,
|
||||
global_ctx->time_of_merge,
|
||||
global_ctx->new_data_part,
|
||||
global_ctx->space_reservation
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeMergeProjections() // NOLINT
|
||||
{
|
||||
/// In case if there are no projections we didn't construct a task
|
||||
if (!ctx->merge_projection_parts_task_ptr)
|
||||
return false;
|
||||
|
||||
if (ctx->merge_projection_parts_task_ptr->executeStep())
|
||||
return true;
|
||||
|
||||
++ctx->projection_parts_iterator;
|
||||
|
||||
if (ctx->projection_parts_iterator == std::make_move_iterator(ctx->projection_parts.end()))
|
||||
return false;
|
||||
|
||||
constructTaskForProjectionPartsMerge();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl()
|
||||
{
|
||||
Stopwatch watch(CLOCK_MONOTONIC_COARSE);
|
||||
@ -535,6 +689,8 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl()
|
||||
global_ctx->rows_written += block.rows();
|
||||
const_cast<MergedBlockOutputStream &>(*global_ctx->to).write(block);
|
||||
|
||||
calculateProjections(block);
|
||||
|
||||
UInt64 result_rows = 0;
|
||||
UInt64 result_bytes = 0;
|
||||
global_ctx->merged_pipeline.tryGetResultRowsAndBytes(result_rows, result_bytes);
|
||||
@ -558,8 +714,10 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl()
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void MergeTask::ExecuteAndFinalizeHorizontalPart::finalize() const
|
||||
{
|
||||
finalizeProjections();
|
||||
global_ctx->merging_executor.reset();
|
||||
global_ctx->merged_pipeline.reset();
|
||||
|
||||
@ -847,35 +1005,9 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c
|
||||
ReadableSize(global_ctx->merge_list_element_ptr->bytes_read_uncompressed / elapsed_seconds));
|
||||
}
|
||||
|
||||
|
||||
const auto mode = global_ctx->data->getSettings()->deduplicate_merge_projection_mode;
|
||||
/// Under throw mode, we still choose to drop projections due to backward compatibility since some
|
||||
/// users might have projections before this change.
|
||||
if (global_ctx->data->merging_params.mode != MergeTreeData::MergingParams::Ordinary
|
||||
&& (mode == DeduplicateMergeProjectionMode::THROW || mode == DeduplicateMergeProjectionMode::DROP))
|
||||
for (const auto & projection : global_ctx->projections_to_merge)
|
||||
{
|
||||
ctx->projections_iterator = ctx->tasks_for_projections.begin();
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto & projections = global_ctx->metadata_snapshot->getProjections();
|
||||
|
||||
for (const auto & projection : projections)
|
||||
{
|
||||
MergeTreeData::DataPartsVector projection_parts;
|
||||
for (const auto & part : global_ctx->future_part->parts)
|
||||
{
|
||||
auto actual_projection_parts = part->getProjectionParts();
|
||||
auto it = actual_projection_parts.find(projection.name);
|
||||
if (it != actual_projection_parts.end() && !it->second->is_broken)
|
||||
projection_parts.push_back(it->second);
|
||||
}
|
||||
if (projection_parts.size() < global_ctx->future_part->parts.size())
|
||||
{
|
||||
LOG_DEBUG(ctx->log, "Projection {} is not merged because some parts don't have it", projection.name);
|
||||
continue;
|
||||
}
|
||||
|
||||
MergeTreeData::DataPartsVector projection_parts = global_ctx->projections_to_merge_parts[projection->name];
|
||||
LOG_DEBUG(
|
||||
ctx->log,
|
||||
"Selected {} projection_parts from {} to {}",
|
||||
@ -885,24 +1017,25 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c
|
||||
|
||||
auto projection_future_part = std::make_shared<FutureMergedMutatedPart>();
|
||||
projection_future_part->assign(std::move(projection_parts));
|
||||
projection_future_part->name = projection.name;
|
||||
projection_future_part->name = projection->name;
|
||||
// TODO (ab): path in future_part is only for merge process introspection, which is not available for merges of projection parts.
|
||||
// Let's comment this out to avoid code inconsistency and add it back after we implement projection merge introspection.
|
||||
// projection_future_part->path = global_ctx->future_part->path + "/" + projection.name + ".proj/";
|
||||
projection_future_part->part_info = {"all", 0, 0, 0};
|
||||
projection_future_part->part_info = MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION;
|
||||
|
||||
MergeTreeData::MergingParams projection_merging_params;
|
||||
projection_merging_params.mode = MergeTreeData::MergingParams::Ordinary;
|
||||
if (projection.type == ProjectionDescription::Type::Aggregate)
|
||||
if (projection->type == ProjectionDescription::Type::Aggregate)
|
||||
projection_merging_params.mode = MergeTreeData::MergingParams::Aggregating;
|
||||
|
||||
ctx->tasks_for_projections.emplace_back(std::make_shared<MergeTask>(
|
||||
projection_future_part,
|
||||
projection.metadata,
|
||||
projection->metadata,
|
||||
global_ctx->merge_entry,
|
||||
std::make_unique<MergeListElement>((*global_ctx->merge_entry)->table_id, projection_future_part, global_ctx->context),
|
||||
global_ctx->time_of_merge,
|
||||
global_ctx->context,
|
||||
*global_ctx->holder,
|
||||
global_ctx->space_reservation,
|
||||
global_ctx->deduplicate,
|
||||
global_ctx->deduplicate_by_columns,
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Compression/CompressedReadBuffer.h>
|
||||
#include <Compression/CompressedReadBufferFromFile.h>
|
||||
|
||||
#include <Interpreters/Squashing.h>
|
||||
#include <Interpreters/TemporaryDataOnDisk.h>
|
||||
|
||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
@ -72,6 +73,7 @@ public:
|
||||
std::unique_ptr<MergeListElement> projection_merge_list_element_,
|
||||
time_t time_of_merge_,
|
||||
ContextPtr context_,
|
||||
TableLockHolder & holder,
|
||||
ReservationSharedPtr space_reservation_,
|
||||
bool deduplicate_,
|
||||
Names deduplicate_by_columns_,
|
||||
@ -96,6 +98,7 @@ public:
|
||||
= global_ctx->projection_merge_list_element ? global_ctx->projection_merge_list_element.get() : (*global_ctx->merge_entry)->ptr();
|
||||
global_ctx->time_of_merge = std::move(time_of_merge_);
|
||||
global_ctx->context = std::move(context_);
|
||||
global_ctx->holder = &holder;
|
||||
global_ctx->space_reservation = std::move(space_reservation_);
|
||||
global_ctx->deduplicate = std::move(deduplicate_);
|
||||
global_ctx->deduplicate_by_columns = std::move(deduplicate_by_columns_);
|
||||
@ -151,6 +154,7 @@ private:
|
||||
/// Proper initialization is responsibility of the author
|
||||
struct GlobalRuntimeContext : public IStageRuntimeContext
|
||||
{
|
||||
TableLockHolder * holder;
|
||||
MergeList::Entry * merge_entry{nullptr};
|
||||
/// If not null, use this instead of the global MergeList::Entry. This is for merging projections.
|
||||
std::unique_ptr<MergeListElement> projection_merge_list_element;
|
||||
@ -181,6 +185,10 @@ private:
|
||||
|
||||
MergeAlgorithm chosen_merge_algorithm{MergeAlgorithm::Undecided};
|
||||
|
||||
std::vector<ProjectionDescriptionRawPtr> projections_to_rebuild{};
|
||||
std::vector<ProjectionDescriptionRawPtr> projections_to_merge{};
|
||||
std::map<String, MergeTreeData::DataPartsVector> projections_to_merge_parts{};
|
||||
|
||||
std::unique_ptr<MergeStageProgress> horizontal_stage_progress{nullptr};
|
||||
std::unique_ptr<MergeStageProgress> column_progress{nullptr};
|
||||
|
||||
@ -228,6 +236,14 @@ private:
|
||||
std::unique_ptr<WriteBuffer> rows_sources_write_buf{nullptr};
|
||||
std::optional<ColumnSizeEstimator> column_sizes{};
|
||||
|
||||
/// For projections to rebuild
|
||||
using ProjectionNameToItsBlocks = std::map<String, MergeTreeData::MutableDataPartsVector>;
|
||||
ProjectionNameToItsBlocks projection_parts;
|
||||
std::move_iterator<ProjectionNameToItsBlocks::iterator> projection_parts_iterator;
|
||||
std::vector<Squashing> projection_squashes;
|
||||
size_t projection_block_num = 0;
|
||||
ExecutableTaskPtr merge_projection_parts_task_ptr;
|
||||
|
||||
size_t initial_reservation{0};
|
||||
bool read_with_direct_io{false};
|
||||
|
||||
@ -257,16 +273,23 @@ private:
|
||||
void finalize() const;
|
||||
|
||||
/// NOTE: Using pointer-to-member instead of std::function and lambda makes stacktraces much more concise and readable
|
||||
using ExecuteAndFinalizeHorizontalPartSubtasks = std::array<bool(ExecuteAndFinalizeHorizontalPart::*)(), 2>;
|
||||
using ExecuteAndFinalizeHorizontalPartSubtasks = std::array<bool(ExecuteAndFinalizeHorizontalPart::*)(), 3>;
|
||||
|
||||
const ExecuteAndFinalizeHorizontalPartSubtasks subtasks
|
||||
{
|
||||
&ExecuteAndFinalizeHorizontalPart::prepare,
|
||||
&ExecuteAndFinalizeHorizontalPart::executeImpl
|
||||
&ExecuteAndFinalizeHorizontalPart::executeImpl,
|
||||
&ExecuteAndFinalizeHorizontalPart::executeMergeProjections
|
||||
};
|
||||
|
||||
ExecuteAndFinalizeHorizontalPartSubtasks::const_iterator subtasks_iterator = subtasks.begin();
|
||||
|
||||
void prepareProjectionsToMergeAndRebuild() const;
|
||||
void calculateProjections(const Block & block) const;
|
||||
void finalizeProjections() const;
|
||||
void constructTaskForProjectionPartsMerge() const;
|
||||
bool executeMergeProjections();
|
||||
|
||||
MergeAlgorithm chooseMergeAlgorithm() const;
|
||||
void createMergedStream();
|
||||
void extractMergingAndGatheringColumns() const;
|
||||
|
@ -671,7 +671,7 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart(
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
MergeList::Entry * merge_entry,
|
||||
std::unique_ptr<MergeListElement> projection_merge_list_element,
|
||||
TableLockHolder,
|
||||
TableLockHolder & holder,
|
||||
time_t time_of_merge,
|
||||
ContextPtr context,
|
||||
ReservationSharedPtr space_reservation,
|
||||
@ -691,6 +691,7 @@ MergeTaskPtr MergeTreeDataMergerMutator::mergePartsToTemporaryPart(
|
||||
std::move(projection_merge_list_element),
|
||||
time_of_merge,
|
||||
context,
|
||||
holder,
|
||||
space_reservation,
|
||||
deduplicate,
|
||||
deduplicate_by_columns,
|
||||
|
@ -159,7 +159,7 @@ public:
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
MergeListEntry * merge_entry,
|
||||
std::unique_ptr<MergeListElement> projection_merge_list_element,
|
||||
TableLockHolder table_lock_holder,
|
||||
TableLockHolder & table_lock_holder,
|
||||
time_t time_of_merge,
|
||||
ContextPtr context,
|
||||
ReservationSharedPtr space_reservation,
|
||||
|
@ -577,10 +577,7 @@ void MergeTreeDataPartWriterWide::validateColumnOfFixedSize(const NameAndTypePai
|
||||
|
||||
if (index_granularity_rows != index_granularity.getMarkRows(mark_num))
|
||||
{
|
||||
/// With fixed granularity we can have last mark with less rows than granularity
|
||||
const bool is_last_mark = (mark_num + 1 == index_granularity.getMarksCount());
|
||||
if (!index_granularity_info.fixed_index_granularity || !is_last_mark)
|
||||
throw Exception(
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Incorrect mark rows for part {} for mark #{}"
|
||||
" (compressed offset {}, decompressed offset {}), in-memory {}, on disk {}, total marks {}",
|
||||
@ -844,14 +841,7 @@ void MergeTreeDataPartWriterWide::adjustLastMarkIfNeedAndFlushToDisk(size_t new_
|
||||
/// Without offset
|
||||
rows_written_in_last_mark = 0;
|
||||
}
|
||||
|
||||
if (compute_granularity)
|
||||
{
|
||||
index_granularity.popMark();
|
||||
index_granularity.appendMark(new_rows_in_last_mark);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -92,13 +92,14 @@ size_t MergeTreeDataSelectExecutor::getApproximateTotalRowsToRead(
|
||||
/// We will find out how many rows we would have read without sampling.
|
||||
LOG_DEBUG(log, "Preliminary index scan with condition: {}", key_condition.toString());
|
||||
|
||||
MarkRanges exact_ranges;
|
||||
for (const auto & part : parts)
|
||||
{
|
||||
MarkRanges exact_ranges;
|
||||
markRangesFromPKRange(part, metadata_snapshot, key_condition, {}, &exact_ranges, settings, log);
|
||||
for (const auto & range : exact_ranges)
|
||||
MarkRanges part_ranges = markRangesFromPKRange(part, metadata_snapshot, key_condition, {}, &exact_ranges, settings, log);
|
||||
for (const auto & range : part_ranges)
|
||||
rows_count += part->index_granularity.getRowsCountInRange(range);
|
||||
}
|
||||
UNUSED(exact_ranges);
|
||||
|
||||
return rows_count;
|
||||
}
|
||||
|
@ -128,14 +128,14 @@ void MergeTreeIndexAggregatorFullText::update(const Block & block, size_t * pos,
|
||||
"Position: {}, Block rows: {}.", *pos, block.rows());
|
||||
|
||||
size_t rows_read = std::min(limit, block.rows() - *pos);
|
||||
auto row_id = store->getNextRowIDRange(rows_read);
|
||||
auto start_row_id = row_id;
|
||||
auto start_row_id = store->getNextRowIDRange(rows_read);
|
||||
|
||||
for (size_t col = 0; col < index_columns.size(); ++col)
|
||||
{
|
||||
const auto & column_with_type = block.getByName(index_columns[col]);
|
||||
const auto & column = column_with_type.column;
|
||||
size_t current_position = *pos;
|
||||
auto row_id = start_row_id;
|
||||
|
||||
bool need_to_write = false;
|
||||
if (isArray(column_with_type.type))
|
||||
|
@ -52,8 +52,10 @@ const std::unordered_map<String, unum::usearch::metric_kind_t> distanceFunctionT
|
||||
|
||||
/// Maps from user-facing name to internal name
|
||||
const std::unordered_map<String, unum::usearch::scalar_kind_t> quantizationToScalarKind = {
|
||||
{"f64", unum::usearch::scalar_kind_t::f64_k},
|
||||
{"f32", unum::usearch::scalar_kind_t::f32_k},
|
||||
{"f16", unum::usearch::scalar_kind_t::f16_k},
|
||||
{"bf16", unum::usearch::scalar_kind_t::bf16_k},
|
||||
{"i8", unum::usearch::scalar_kind_t::i8_k}};
|
||||
/// Usearch provides more quantizations but ^^ above ones seem the only ones comprehensively supported across all distance functions.
|
||||
|
||||
@ -461,7 +463,7 @@ MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index)
|
||||
{
|
||||
/// Default parameters:
|
||||
unum::usearch::metric_kind_t metric_kind = distanceFunctionToMetricKind.at(index.arguments[1].safeGet<String>());
|
||||
unum::usearch::scalar_kind_t scalar_kind = unum::usearch::scalar_kind_t::f32_k;
|
||||
unum::usearch::scalar_kind_t scalar_kind = unum::usearch::scalar_kind_t::bf16_k;
|
||||
UsearchHnswParams usearch_hnsw_params;
|
||||
|
||||
/// Optional parameters:
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
|
||||
#include <Storages/MergeTree/StorageFromMergeTreeDataPart.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataWriter.h>
|
||||
#include <Storages/MergeTree/MergeProjectionPartsTask.h>
|
||||
#include <Storages/MutationCommands.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataMergerMutator.h>
|
||||
#include <Storages/MergeTree/MergeTreeIndexFullText.h>
|
||||
@ -1058,136 +1059,6 @@ struct MutationContext
|
||||
using MutationContextPtr = std::shared_ptr<MutationContext>;
|
||||
|
||||
|
||||
class MergeProjectionPartsTask : public IExecutableTask
|
||||
{
|
||||
public:
|
||||
|
||||
MergeProjectionPartsTask(
|
||||
String name_,
|
||||
MergeTreeData::MutableDataPartsVector && parts_,
|
||||
const ProjectionDescription & projection_,
|
||||
size_t & block_num_,
|
||||
MutationContextPtr ctx_)
|
||||
: name(std::move(name_))
|
||||
, parts(std::move(parts_))
|
||||
, projection(projection_)
|
||||
, block_num(block_num_)
|
||||
, ctx(ctx_)
|
||||
, log(getLogger("MergeProjectionPartsTask"))
|
||||
{
|
||||
LOG_DEBUG(log, "Selected {} projection_parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name);
|
||||
level_parts[current_level] = std::move(parts);
|
||||
}
|
||||
|
||||
void onCompleted() override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
|
||||
StorageID getStorageID() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
|
||||
Priority getPriority() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
|
||||
String getQueryId() const override { throw Exception(ErrorCodes::LOGICAL_ERROR, "Not implemented"); }
|
||||
|
||||
bool executeStep() override
|
||||
{
|
||||
auto & current_level_parts = level_parts[current_level];
|
||||
auto & next_level_parts = level_parts[next_level];
|
||||
|
||||
MergeTreeData::MutableDataPartsVector selected_parts;
|
||||
while (selected_parts.size() < max_parts_to_merge_in_one_level && !current_level_parts.empty())
|
||||
{
|
||||
selected_parts.push_back(std::move(current_level_parts.back()));
|
||||
current_level_parts.pop_back();
|
||||
}
|
||||
|
||||
if (selected_parts.empty())
|
||||
{
|
||||
if (next_level_parts.empty())
|
||||
{
|
||||
LOG_WARNING(log, "There is no projection parts merged");
|
||||
|
||||
/// Task is finished
|
||||
return false;
|
||||
}
|
||||
current_level = next_level;
|
||||
++next_level;
|
||||
}
|
||||
else if (selected_parts.size() == 1)
|
||||
{
|
||||
if (next_level_parts.empty())
|
||||
{
|
||||
LOG_DEBUG(log, "Merged a projection part in level {}", current_level);
|
||||
selected_parts[0]->renameTo(projection.name + ".proj", true);
|
||||
selected_parts[0]->setName(projection.name);
|
||||
selected_parts[0]->is_temp = false;
|
||||
ctx->new_data_part->addProjectionPart(name, std::move(selected_parts[0]));
|
||||
|
||||
/// Task is finished
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_DEBUG(log, "Forwarded part {} in level {} to next level", selected_parts[0]->name, current_level);
|
||||
next_level_parts.push_back(std::move(selected_parts[0]));
|
||||
}
|
||||
}
|
||||
else if (selected_parts.size() > 1)
|
||||
{
|
||||
// Generate a unique part name
|
||||
++block_num;
|
||||
auto projection_future_part = std::make_shared<FutureMergedMutatedPart>();
|
||||
MergeTreeData::DataPartsVector const_selected_parts(
|
||||
std::make_move_iterator(selected_parts.begin()), std::make_move_iterator(selected_parts.end()));
|
||||
projection_future_part->assign(std::move(const_selected_parts));
|
||||
projection_future_part->name = fmt::format("{}_{}", projection.name, ++block_num);
|
||||
projection_future_part->part_info = {"all", 0, 0, 0};
|
||||
|
||||
MergeTreeData::MergingParams projection_merging_params;
|
||||
projection_merging_params.mode = MergeTreeData::MergingParams::Ordinary;
|
||||
if (projection.type == ProjectionDescription::Type::Aggregate)
|
||||
projection_merging_params.mode = MergeTreeData::MergingParams::Aggregating;
|
||||
|
||||
LOG_DEBUG(log, "Merged {} parts in level {} to {}", selected_parts.size(), current_level, projection_future_part->name);
|
||||
auto tmp_part_merge_task = ctx->mutator->mergePartsToTemporaryPart(
|
||||
projection_future_part,
|
||||
projection.metadata,
|
||||
ctx->mutate_entry,
|
||||
std::make_unique<MergeListElement>((*ctx->mutate_entry)->table_id, projection_future_part, ctx->context),
|
||||
*ctx->holder,
|
||||
ctx->time_of_mutation,
|
||||
ctx->context,
|
||||
ctx->space_reservation,
|
||||
false, // TODO Do we need deduplicate for projections
|
||||
{},
|
||||
false, // no cleanup
|
||||
projection_merging_params,
|
||||
NO_TRANSACTION_PTR,
|
||||
/* need_prefix */ true,
|
||||
ctx->new_data_part.get(),
|
||||
".tmp_proj");
|
||||
|
||||
next_level_parts.push_back(executeHere(tmp_part_merge_task));
|
||||
next_level_parts.back()->is_temp = true;
|
||||
}
|
||||
|
||||
/// Need execute again
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
String name;
|
||||
MergeTreeData::MutableDataPartsVector parts;
|
||||
const ProjectionDescription & projection;
|
||||
size_t & block_num;
|
||||
MutationContextPtr ctx;
|
||||
|
||||
LoggerPtr log;
|
||||
|
||||
std::map<size_t, MergeTreeData::MutableDataPartsVector> level_parts;
|
||||
size_t current_level = 0;
|
||||
size_t next_level = 1;
|
||||
|
||||
/// TODO(nikitamikhaylov): make this constant a setting
|
||||
static constexpr size_t max_parts_to_merge_in_one_level = 10;
|
||||
};
|
||||
|
||||
|
||||
// This class is responsible for:
|
||||
// 1. get projection pipeline and a sink to write parts
|
||||
// 2. build an executor that can write block to the input stream (actually we can write through it to generate as many parts as possible)
|
||||
@ -1406,7 +1277,13 @@ void PartMergerWriter::constructTaskForProjectionPartsMerge()
|
||||
std::move(parts),
|
||||
projection,
|
||||
block_num,
|
||||
ctx
|
||||
ctx->context,
|
||||
ctx->holder,
|
||||
ctx->mutator,
|
||||
ctx->mutate_entry,
|
||||
ctx->time_of_mutation,
|
||||
ctx->new_data_part,
|
||||
ctx->space_reservation
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -148,10 +148,12 @@ void StorageAzureConfiguration::fromAST(ASTs & engine_args, ContextPtr context,
|
||||
{
|
||||
if (engine_args.size() < 3 || engine_args.size() > (with_structure ? 8 : 7))
|
||||
{
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Storage AzureBlobStorage requires 3 to 7 arguments: "
|
||||
"AzureBlobStorage(connection_string|storage_account_url, container_name, blobpath, "
|
||||
"[account_name, account_key, format, compression, structure)])");
|
||||
throw Exception(
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Storage AzureBlobStorage requires 3 to {} arguments: "
|
||||
"AzureBlobStorage(connection_string|storage_account_url, container_name, blobpath, "
|
||||
"[account_name, account_key, format, compression, structure)])",
|
||||
(with_structure ? 8 : 7));
|
||||
}
|
||||
|
||||
for (auto & engine_arg : engine_args)
|
||||
|
@ -3,7 +3,7 @@
|
||||
#include "config.h"
|
||||
#include <set>
|
||||
|
||||
#if USE_AWS_S3 && USE_PARQUET
|
||||
#if USE_PARQUET
|
||||
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
@ -425,8 +425,9 @@ struct DeltaLakeMetadataImpl
|
||||
{
|
||||
auto field = fields->getObject(static_cast<Int32>(i));
|
||||
element_names.push_back(field->getValue<String>("name"));
|
||||
auto required = field->getValue<bool>("required");
|
||||
element_types.push_back(getFieldType(field, "type", required));
|
||||
|
||||
auto is_nullable = field->getValue<bool>("nullable");
|
||||
element_types.push_back(getFieldType(field, "type", is_nullable));
|
||||
}
|
||||
|
||||
return std::make_shared<DataTypeTuple>(element_types, element_names);
|
||||
@ -434,16 +435,16 @@ struct DeltaLakeMetadataImpl
|
||||
|
||||
if (type_name == "array")
|
||||
{
|
||||
bool is_nullable = type->getValue<bool>("containsNull");
|
||||
auto element_type = getFieldType(type, "elementType", is_nullable);
|
||||
bool element_nullable = type->getValue<bool>("containsNull");
|
||||
auto element_type = getFieldType(type, "elementType", element_nullable);
|
||||
return std::make_shared<DataTypeArray>(element_type);
|
||||
}
|
||||
|
||||
if (type_name == "map")
|
||||
{
|
||||
bool is_nullable = type->getValue<bool>("containsNull");
|
||||
auto key_type = getFieldType(type, "keyType", /* is_nullable */false);
|
||||
auto value_type = getFieldType(type, "valueType", is_nullable);
|
||||
bool value_nullable = type->getValue<bool>("valueContainsNull");
|
||||
auto value_type = getFieldType(type, "valueType", value_nullable);
|
||||
return std::make_shared<DataTypeMap>(key_type, value_type);
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3 && USE_AVRO
|
||||
#if USE_AVRO
|
||||
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <Storages/IStorage.h>
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3 && USE_AVRO
|
||||
#if USE_AVRO
|
||||
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Core/Settings.h>
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#if USE_AWS_S3 && USE_AVRO /// StorageIceberg depending on Avro to parse metadata with Avro format.
|
||||
#if USE_AVRO /// StorageIceberg depending on Avro to parse metadata with Avro format.
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Core/Types.h>
|
||||
|
@ -2,10 +2,12 @@
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <Storages/ObjectStorage/DataLakes/IDataLakeMetadata.h>
|
||||
#include <Storages/ObjectStorage/DataLakes/IStorageDataLake.h>
|
||||
#include <Storages/ObjectStorage/DataLakes/IcebergMetadata.h>
|
||||
#include <Storages/ObjectStorage/S3/Configuration.h>
|
||||
# include <Storages/ObjectStorage/Azure/Configuration.h>
|
||||
# include <Storages/ObjectStorage/DataLakes/IDataLakeMetadata.h>
|
||||
# include <Storages/ObjectStorage/DataLakes/IStorageDataLake.h>
|
||||
# include <Storages/ObjectStorage/DataLakes/IcebergMetadata.h>
|
||||
# include <Storages/ObjectStorage/Local/Configuration.h>
|
||||
# include <Storages/ObjectStorage/S3/Configuration.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -22,6 +24,54 @@ void registerStorageIceberg(StorageFactory & factory)
|
||||
auto configuration = std::make_shared<StorageS3Configuration>();
|
||||
StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false);
|
||||
|
||||
return StorageIceberg::create(
|
||||
configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode);
|
||||
},
|
||||
{
|
||||
.supports_settings = false,
|
||||
.supports_schema_inference = true,
|
||||
.source_access_type = AccessType::S3,
|
||||
});
|
||||
|
||||
factory.registerStorage(
|
||||
"IcebergS3",
|
||||
[&](const StorageFactory::Arguments & args)
|
||||
{
|
||||
auto configuration = std::make_shared<StorageS3Configuration>();
|
||||
StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false);
|
||||
|
||||
return StorageIceberg::create(
|
||||
configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode);
|
||||
},
|
||||
{
|
||||
.supports_settings = false,
|
||||
.supports_schema_inference = true,
|
||||
.source_access_type = AccessType::S3,
|
||||
});
|
||||
|
||||
factory.registerStorage(
|
||||
"IcebergAzure",
|
||||
[&](const StorageFactory::Arguments & args)
|
||||
{
|
||||
auto configuration = std::make_shared<StorageAzureConfiguration>();
|
||||
StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), true);
|
||||
|
||||
return StorageIceberg::create(
|
||||
configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode);
|
||||
},
|
||||
{
|
||||
.supports_settings = false,
|
||||
.supports_schema_inference = true,
|
||||
.source_access_type = AccessType::AZURE,
|
||||
});
|
||||
|
||||
factory.registerStorage(
|
||||
"IcebergLocal",
|
||||
[&](const StorageFactory::Arguments & args)
|
||||
{
|
||||
auto configuration = std::make_shared<StorageLocalConfiguration>();
|
||||
StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false);
|
||||
|
||||
return StorageIceberg::create(
|
||||
configuration, args.getContext(), args.table_id, args.columns,
|
||||
args.constraints, args.comment, std::nullopt, args.mode);
|
||||
@ -29,7 +79,7 @@ void registerStorageIceberg(StorageFactory & factory)
|
||||
{
|
||||
.supports_settings = false,
|
||||
.supports_schema_inference = true,
|
||||
.source_access_type = AccessType::S3,
|
||||
.source_access_type = AccessType::FILE,
|
||||
});
|
||||
}
|
||||
|
||||
|
77
src/Storages/ObjectStorage/Local/Configuration.cpp
Normal file
77
src/Storages/ObjectStorage/Local/Configuration.cpp
Normal file
@ -0,0 +1,77 @@
|
||||
#include <Core/Settings.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <Storages/ObjectStorage/Local/Configuration.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include "Common/NamedCollections/NamedCollections.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
void StorageLocalConfiguration::fromNamedCollection(const NamedCollection & collection, ContextPtr)
|
||||
{
|
||||
path = collection.get<String>("path");
|
||||
format = collection.getOrDefault<String>("format", "auto");
|
||||
compression_method = collection.getOrDefault<String>("compression_method", collection.getOrDefault<String>("compression", "auto"));
|
||||
structure = collection.getOrDefault<String>("structure", "auto");
|
||||
paths = {path};
|
||||
}
|
||||
|
||||
|
||||
void StorageLocalConfiguration::fromAST(ASTs & args, ContextPtr context, bool with_structure)
|
||||
{
|
||||
const size_t max_args_num = with_structure ? 4 : 3;
|
||||
if (args.empty() || args.size() > max_args_num)
|
||||
{
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Expected not more than {} arguments", max_args_num);
|
||||
}
|
||||
|
||||
for (auto & arg : args)
|
||||
arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context);
|
||||
|
||||
path = checkAndGetLiteralArgument<String>(args[0], "path");
|
||||
|
||||
if (args.size() > 1)
|
||||
{
|
||||
format = checkAndGetLiteralArgument<String>(args[1], "format_name");
|
||||
}
|
||||
|
||||
if (with_structure)
|
||||
{
|
||||
if (args.size() > 2)
|
||||
{
|
||||
structure = checkAndGetLiteralArgument<String>(args[2], "structure");
|
||||
}
|
||||
if (args.size() > 3)
|
||||
{
|
||||
compression_method = checkAndGetLiteralArgument<String>(args[3], "compression_method");
|
||||
}
|
||||
}
|
||||
else if (args.size() > 2)
|
||||
{
|
||||
compression_method = checkAndGetLiteralArgument<String>(args[2], "compression_method");
|
||||
}
|
||||
paths = {path};
|
||||
}
|
||||
|
||||
StorageObjectStorage::QuerySettings StorageLocalConfiguration::getQuerySettings(const ContextPtr & context) const
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
return StorageObjectStorage::QuerySettings{
|
||||
.truncate_on_insert = settings.engine_file_truncate_on_insert,
|
||||
.create_new_file_on_insert = false,
|
||||
.schema_inference_use_cache = settings.schema_inference_use_cache_for_file,
|
||||
.schema_inference_mode = settings.schema_inference_mode,
|
||||
.skip_empty_files = settings.engine_file_skip_empty_files,
|
||||
.list_object_keys_size = 0,
|
||||
.throw_on_zero_files_match = false,
|
||||
.ignore_non_existent_file = false};
|
||||
}
|
||||
|
||||
}
|
52
src/Storages/ObjectStorage/Local/Configuration.h
Normal file
52
src/Storages/ObjectStorage/Local/Configuration.h
Normal file
@ -0,0 +1,52 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "Disks/ObjectStorages/Local/LocalObjectStorage.h"
|
||||
|
||||
#include <Storages/ObjectStorage/StorageObjectStorage.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class StorageLocalConfiguration : public StorageObjectStorage::Configuration
|
||||
{
|
||||
public:
|
||||
using ConfigurationPtr = StorageObjectStorage::ConfigurationPtr;
|
||||
|
||||
static constexpr auto type_name = "local";
|
||||
|
||||
StorageLocalConfiguration() = default;
|
||||
StorageLocalConfiguration(const StorageLocalConfiguration & other) = default;
|
||||
|
||||
std::string getTypeName() const override { return type_name; }
|
||||
std::string getEngineName() const override { return "Local"; }
|
||||
|
||||
Path getPath() const override { return path; }
|
||||
void setPath(const Path & path_) override { path = path_; }
|
||||
|
||||
const Paths & getPaths() const override { return paths; }
|
||||
void setPaths(const Paths & paths_) override { paths = paths_; }
|
||||
|
||||
String getNamespace() const override { return ""; }
|
||||
String getDataSourceDescription() const override { return ""; }
|
||||
StorageObjectStorage::QuerySettings getQuerySettings(const ContextPtr &) const override;
|
||||
|
||||
ConfigurationPtr clone() override { return std::make_shared<StorageLocalConfiguration>(*this); }
|
||||
|
||||
ObjectStoragePtr createObjectStorage(ContextPtr, bool) override { return std::make_shared<LocalObjectStorage>("/"); }
|
||||
|
||||
void addStructureAndFormatToArgs(ASTs &, const String &, const String &, ContextPtr) override { }
|
||||
|
||||
private:
|
||||
void fromNamedCollection(const NamedCollection & collection, ContextPtr context) override;
|
||||
void fromAST(ASTs & args, ContextPtr context, bool with_structure) override;
|
||||
Path path;
|
||||
Paths paths;
|
||||
};
|
||||
|
||||
}
|
@ -465,6 +465,12 @@ SchemaCache & StorageObjectStorage::getSchemaCache(const ContextPtr & context, c
|
||||
DEFAULT_SCHEMA_CACHE_ELEMENTS));
|
||||
return schema_cache;
|
||||
}
|
||||
else if (storage_type_name == "local")
|
||||
{
|
||||
static SchemaCache schema_cache(
|
||||
context->getConfigRef().getUInt("schema_inference_cache_max_elements_for_local", DEFAULT_SCHEMA_CACHE_ELEMENTS));
|
||||
return schema_cache;
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported storage type: {}", storage_type_name);
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ public:
|
||||
ContextPtr local_context,
|
||||
bool with_table_structure);
|
||||
|
||||
/// Storage type: s3, hdfs, azure.
|
||||
/// Storage type: s3, hdfs, azure, local.
|
||||
virtual std::string getTypeName() const = 0;
|
||||
/// Engine name: S3, HDFS, Azure.
|
||||
virtual std::string getEngineName() const = 0;
|
||||
|
@ -417,10 +417,7 @@ std::future<StorageObjectStorageSource::ReaderHolder> StorageObjectStorageSource
|
||||
}
|
||||
|
||||
std::unique_ptr<ReadBuffer> StorageObjectStorageSource::createReadBuffer(
|
||||
const ObjectInfo & object_info,
|
||||
const ObjectStoragePtr & object_storage,
|
||||
const ContextPtr & context_,
|
||||
const LoggerPtr & log)
|
||||
const ObjectInfo & object_info, const ObjectStoragePtr & object_storage, const ContextPtr & context_, const LoggerPtr & log)
|
||||
{
|
||||
const auto & object_size = object_info.metadata->size_bytes;
|
||||
|
||||
|
@ -44,10 +44,11 @@
|
||||
#include <IO/HTTPHeaderEntries.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Poco/Net/HTTPRequest.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -166,7 +167,19 @@ IStorageURLBase::IStorageURLBase(
|
||||
storage_metadata.setConstraints(constraints_);
|
||||
storage_metadata.setComment(comment);
|
||||
|
||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, context_, getSampleURI(uri, context_), format_settings));
|
||||
auto virtual_columns_desc = VirtualColumnUtils::getVirtualsForFileLikeStorage(
|
||||
storage_metadata.columns, context_, getSampleURI(uri, context_), format_settings);
|
||||
if (!storage_metadata.getColumns().has("_headers"))
|
||||
{
|
||||
virtual_columns_desc.addEphemeral(
|
||||
"_headers",
|
||||
std::make_shared<DataTypeMap>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()),
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>())),
|
||||
"");
|
||||
}
|
||||
|
||||
setVirtuals(virtual_columns_desc);
|
||||
setInMemoryMetadata(storage_metadata);
|
||||
}
|
||||
|
||||
@ -292,11 +305,13 @@ StorageURLSource::StorageURLSource(
|
||||
const URIParams & params,
|
||||
bool glob_url,
|
||||
bool need_only_count_)
|
||||
: SourceWithKeyCondition(info.source_header, false), WithContext(context_)
|
||||
: SourceWithKeyCondition(info.source_header, false)
|
||||
, WithContext(context_)
|
||||
, name(std::move(name_))
|
||||
, columns_description(info.columns_description)
|
||||
, requested_columns(info.requested_columns)
|
||||
, requested_virtual_columns(info.requested_virtual_columns)
|
||||
, need_headers_virtual_column(info.requested_virtual_columns.contains("_headers"))
|
||||
, requested_virtual_columns(info.requested_virtual_columns.eraseNames({"_headers"}))
|
||||
, block_for_format(info.format_header)
|
||||
, uri_iterator(uri_iterator_)
|
||||
, format(format_)
|
||||
@ -431,11 +446,28 @@ Chunk StorageURLSource::generate()
|
||||
|
||||
progress(num_rows, chunk_size ? chunk_size : chunk.bytes());
|
||||
VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk(
|
||||
chunk, requested_virtual_columns,
|
||||
chunk,
|
||||
requested_virtual_columns,
|
||||
{
|
||||
.path = curr_uri.getPath(),
|
||||
.size = current_file_size,
|
||||
}, getContext());
|
||||
},
|
||||
getContext());
|
||||
chassert(dynamic_cast<ReadWriteBufferFromHTTP *>(read_buf.get()));
|
||||
if (need_headers_virtual_column)
|
||||
{
|
||||
if (!http_response_headers_initialized)
|
||||
{
|
||||
http_response_headers = dynamic_cast<ReadWriteBufferFromHTTP *>(read_buf.get())->getResponseHeaders();
|
||||
http_response_headers_initialized = true;
|
||||
}
|
||||
|
||||
auto type = std::make_shared<DataTypeMap>(
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()),
|
||||
std::make_shared<DataTypeLowCardinality>(std::make_shared<DataTypeString>()));
|
||||
|
||||
chunk.addColumn(type->createColumnConst(chunk.getNumRows(), http_response_headers)->convertToFullColumnIfConst());
|
||||
}
|
||||
return chunk;
|
||||
}
|
||||
|
||||
@ -446,6 +478,7 @@ Chunk StorageURLSource::generate()
|
||||
reader.reset();
|
||||
input_format.reset();
|
||||
read_buf.reset();
|
||||
http_response_headers_initialized = false;
|
||||
total_rows_in_file = 0;
|
||||
}
|
||||
return {};
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user