mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge remote-tracking branch 'origin/master' into try_global_trace_collector
This commit is contained in:
commit
e514a8b49d
@ -96,7 +96,6 @@ Checks: [
|
||||
'-modernize-use-default-member-init',
|
||||
'-modernize-use-emplace',
|
||||
'-modernize-use-nodiscard',
|
||||
'-modernize-use-override',
|
||||
'-modernize-use-trailing-return-type',
|
||||
|
||||
'-performance-inefficient-string-concatenation',
|
||||
|
@ -123,7 +123,6 @@
|
||||
* Something was wrong with Apache Hive, which is experimental and not supported. [#60262](https://github.com/ClickHouse/ClickHouse/pull/60262) ([shanfengp](https://github.com/Aed-p)).
|
||||
* An improvement for experimental parallel replicas: force reanalysis if parallel replicas changed [#60362](https://github.com/ClickHouse/ClickHouse/pull/60362) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix usage of plain metadata type with new disks configuration option [#60396](https://github.com/ClickHouse/ClickHouse/pull/60396) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Don't allow to set max_parallel_replicas to 0 as it doesn't make sense [#60430](https://github.com/ClickHouse/ClickHouse/pull/60430) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Try to fix logical error 'Cannot capture column because it has incompatible type' in mapContainsKeyLike [#60451](https://github.com/ClickHouse/ClickHouse/pull/60451) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
@ -314,13 +314,13 @@ static int read_unicode(json_stream *json)
|
||||
|
||||
if (l < 0xdc00 || l > 0xdfff) {
|
||||
json_error(json, "invalid surrogate pair continuation \\u%04lx out "
|
||||
"of range (dc00-dfff)", l);
|
||||
"of range (dc00-dfff)", (unsigned long)l);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cp = ((h - 0xd800) * 0x400) + ((l - 0xdc00) + 0x10000);
|
||||
} else if (cp >= 0xdc00 && cp <= 0xdfff) {
|
||||
json_error(json, "dangling surrogate \\u%04lx", cp);
|
||||
json_error(json, "dangling surrogate \\u%04lx", (unsigned long)cp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 4a12f99dfc9d47c687ff7700b927cc76856225d1
|
||||
Subproject commit 08ac76ea80a37f89b12109c805eafe9f1dc9b991
|
@ -32,6 +32,7 @@ set(SRCS
|
||||
"${LIBRARY_DIR}/src/handle_custom_notification.cxx"
|
||||
"${LIBRARY_DIR}/src/handle_vote.cxx"
|
||||
"${LIBRARY_DIR}/src/launcher.cxx"
|
||||
"${LIBRARY_DIR}/src/log_entry.cxx"
|
||||
"${LIBRARY_DIR}/src/srv_config.cxx"
|
||||
"${LIBRARY_DIR}/src/snapshot_sync_req.cxx"
|
||||
"${LIBRARY_DIR}/src/snapshot_sync_ctx.cxx"
|
||||
|
@ -79,7 +79,7 @@ The supported formats are:
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✗ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
@ -1487,7 +1487,7 @@ Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000
|
||||
- [output_format_pretty_max_value_width](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_value_width) - Maximum width of value to display in Pretty formats. If greater - it will be cut. Default value - `10000`.
|
||||
- [output_format_pretty_color](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`.
|
||||
- [output_format_pretty_grid_charset](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `false`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `true`.
|
||||
|
||||
## RowBinary {#rowbinary}
|
||||
|
||||
|
@ -1642,7 +1642,7 @@ Possible values:
|
||||
- 0 — Output without row numbers.
|
||||
- 1 — Output with row numbers.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `1`.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -36,7 +36,7 @@ E.g. configuration option
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
```
|
||||
|
||||
@ -47,7 +47,7 @@ is equal to configuration (from `24.1`):
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>local</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
```
|
||||
|
||||
@ -56,7 +56,7 @@ Configuration
|
||||
<s3_plain>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -67,7 +67,7 @@ is equal to
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>plain</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -79,7 +79,7 @@ Example of full storage configuration will look like:
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
@ -105,7 +105,7 @@ Starting with 24.1 clickhouse version, it can also look like:
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>local</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
@ -324,7 +324,7 @@ Configuration:
|
||||
<s3_plain>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -337,7 +337,7 @@ Configuration:
|
||||
<object_storage_type>azure</object_storage_type>
|
||||
<metadata_type>plain</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
|
@ -483,7 +483,7 @@ Where:
|
||||
|
||||
- `r1`- the number of unique visitors who visited the site during 2020-01-01 (the `cond1` condition).
|
||||
- `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions).
|
||||
- `r3`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions).
|
||||
- `r3`- the number of unique visitors who visited the site during a specific time period on 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions).
|
||||
|
||||
## uniqUpTo(N)(x)
|
||||
|
||||
|
@ -36,9 +36,9 @@ You can explicitly set a time zone for `DateTime`-type columns when creating a t
|
||||
|
||||
The [clickhouse-client](../../interfaces/cli.md) applies the server time zone by default if a time zone isn’t explicitly set when initializing the data type. To use the client time zone, run `clickhouse-client` with the `--use_client_time_zone` parameter.
|
||||
|
||||
ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings.md#settings-date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function.
|
||||
ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings-formats.md#date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function.
|
||||
|
||||
When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings.md#settings-date_time_input_format) setting.
|
||||
When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings-formats.md#date_time_input_format) setting.
|
||||
|
||||
## Examples
|
||||
|
||||
@ -147,8 +147,8 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
|
||||
- [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md)
|
||||
- [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Functions for working with arrays](../../sql-reference/functions/array-functions.md)
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#settings-date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#settings-date_time_output_format)
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format)
|
||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
||||
|
@ -395,3 +395,37 @@ SELECT v, variantType(v) FROM test ORDER by v;
|
||||
│ 100 │ UInt32 │
|
||||
└─────┴────────────────┘
|
||||
```
|
||||
|
||||
## JSONExtract functions with Variant
|
||||
|
||||
All `JSONExtract*` functions support `Variant` type:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Variant(UInt32, String, Array(UInt32))') AS variant, variantType(variant) AS variant_type;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variant─┬─variant_type──┐
|
||||
│ [1,2,3] │ Array(UInt32) │
|
||||
└─────────┴───────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_variants, mapApply((k, v) -> (k, variantType(v)), map_of_variants) AS map_of_variant_types
|
||||
```
|
||||
|
||||
```text
|
||||
┌─map_of_variants──────────────────┬─map_of_variant_types────────────────────────────┐
|
||||
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
|
||||
└──────────────────────────────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS variants, arrayMap(x -> (x.1, variantType(x.2)), variants) AS variant_types
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variants───────────────────────────────┬─variant_types─────────────────────────────────────────┐
|
||||
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
|
||||
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -99,7 +99,7 @@ Alias: `OCTET_LENGTH`
|
||||
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
Alias:
|
||||
- `CHAR_LENGTH``
|
||||
- `CHAR_LENGTH`
|
||||
- `CHARACTER_LENGTH`
|
||||
|
||||
## leftPad
|
||||
|
@ -2776,7 +2776,7 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
|
||||
- 0 — номера строк не выводятся.
|
||||
- 1 — номера строк выводятся.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
Значение по умолчанию: `1`.
|
||||
|
||||
**Пример**
|
||||
|
||||
|
@ -476,7 +476,7 @@ FROM
|
||||
|
||||
- `r1` - количество уникальных посетителей за 2020-01-01 (`cond1`).
|
||||
- `r2` - количество уникальных посетителей в период между 2020-01-01 и 2020-01-02 (`cond1` и `cond2`).
|
||||
- `r3` - количество уникальных посетителей в период между 2020-01-01 и 2020-01-03 (`cond1` и `cond3`).
|
||||
- `r3` - количество уникальных посетителей в период за 2020-01-01 и 2020-01-03 (`cond1` и `cond3`).
|
||||
|
||||
## uniqUpTo(N)(x) {#uniquptonx}
|
||||
|
||||
|
@ -120,7 +120,7 @@ FROM dt
|
||||
- [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Функции для работы с массивами](../../sql-reference/functions/array-functions.md)
|
||||
- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format)
|
||||
- [Настройка `date_time_output_format`](../../operations/settings/index.md)
|
||||
- [Настройка `date_time_output_format`](../../operations/settings/index.md#settings-date_time_output_format)
|
||||
- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone)
|
||||
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
|
||||
|
@ -472,7 +472,7 @@ FROM
|
||||
|
||||
- `r1`-2020-01-01期间访问该网站的独立访问者数量( `cond1` 条件)。
|
||||
- `r2`-在2020-01-01和2020-01-02之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond2` 条件)。
|
||||
- `r3`-在2020-01-01和2020-01-03之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond3` 条件)。
|
||||
- `r3`-在2020-01-01和2020-01-03 网站的独立访客数量 (`cond1` 和 `cond3` 条件)。
|
||||
|
||||
## uniqUpTo(N)(x) {#uniquptonx}
|
||||
|
||||
|
@ -17,12 +17,13 @@
|
||||
|
||||
#include <Access/AccessControl.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/Config/getClientConfigPath.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/formatReadable.h>
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
|
@ -237,7 +237,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
ASTPtr res = parseQueryAndMovePosition(
|
||||
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth, cmd_settings.max_parser_backtracks);
|
||||
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload = nullptr;
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload;
|
||||
/// If the query is INSERT ... VALUES, then we will try to parse the data.
|
||||
if (auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
ExternalDictionaryLibraryAPI::CStrings strings; // will pass pointer to lib
|
||||
|
||||
private:
|
||||
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder = nullptr;
|
||||
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder;
|
||||
Container strings_holder;
|
||||
};
|
||||
|
||||
|
@ -96,7 +96,7 @@
|
||||
<to>https://{bucket}.s3.amazonaws.com</to>
|
||||
</s3>
|
||||
<gs>
|
||||
<to>https://{bucket}.storage.googleapis.com</to>
|
||||
<to>https://storage.googleapis.com/{bucket}</to>
|
||||
</gs>
|
||||
<oss>
|
||||
<to>https://{bucket}.oss.aliyuncs.com</to>
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include <Access/AccessRights.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <unordered_map>
|
||||
|
@ -115,34 +115,34 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], row_num, arena);
|
||||
data(place).add(*columns[0], row_num, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
|
||||
{
|
||||
this->data(place).addManyDefaults(*columns[0], 0, arena);
|
||||
data(place).addManyDefaults(*columns[0], 0, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(this->data(rhs), arena);
|
||||
data(place).add(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/Combinators/AggregateFunctionCombinatorFactory.h>
|
||||
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
|
||||
static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000;
|
||||
|
||||
|
@ -559,7 +559,7 @@ public:
|
||||
ptr = ptrs[row_num];
|
||||
}
|
||||
|
||||
this->data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(
|
||||
@ -572,7 +572,7 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs), arena);
|
||||
data(place).merge(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t> /* version */) const override
|
||||
@ -590,7 +590,7 @@ public:
|
||||
auto & array = assert_cast<ColumnArray &>(to);
|
||||
auto & str = assert_cast<ColumnString &>(array.getData());
|
||||
|
||||
this->data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
|
||||
array.getOffsets().push_back(str.size());
|
||||
}
|
||||
|
@ -89,10 +89,10 @@ struct GroupArraySamplerData
|
||||
chassert(lim != 0);
|
||||
|
||||
/// With a large number of values, we will generate random numbers several times slower.
|
||||
if (lim <= static_cast<UInt64>(rng.max()))
|
||||
if (lim <= static_cast<UInt64>(pcg32_fast::max()))
|
||||
return rng() % lim;
|
||||
else
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||
}
|
||||
|
||||
void randomShuffle()
|
||||
|
@ -242,7 +242,7 @@ public:
|
||||
{
|
||||
Float64 x = getFloat64DataFromColumn(columns[0], row_num, this->x_type);
|
||||
Float64 y = getFloat64DataFromColumn(columns[1], row_num, this->y_type);
|
||||
this->data(place).add(x, y, arena);
|
||||
data(place).add(x, y, arena);
|
||||
}
|
||||
|
||||
Float64 getFloat64DataFromColumn(const IColumn * column, size_t row_num, TypeIndex type_index) const
|
||||
@ -264,25 +264,25 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
|
||||
{
|
||||
auto res = this->data(place).getResult(total_buckets, arena);
|
||||
auto res = data(place).getResult(total_buckets, arena);
|
||||
|
||||
auto & col = assert_cast<ColumnArray &>(to);
|
||||
auto & col_offsets = assert_cast<ColumnArray::ColumnOffsets &>(col.getOffsetsColumn());
|
||||
|
@ -205,35 +205,35 @@ public:
|
||||
UInt8 is_second = columns[1]->getUInt(row_num);
|
||||
|
||||
if (is_second)
|
||||
this->data(place).addY(value, arena);
|
||||
data(place).addY(value, arena);
|
||||
else
|
||||
this->data(place).addX(value, arena);
|
||||
data(place).addX(value, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
if (!this->data(place).size_x || !this->data(place).size_y)
|
||||
if (!data(place).size_x || !data(place).size_y)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} require both samples to be non empty", getName());
|
||||
|
||||
auto [u_statistic, p_value] = this->data(place).getResult(alternative, continuity_correction);
|
||||
auto [u_statistic, p_value] = data(place).getResult(alternative, continuity_correction);
|
||||
|
||||
/// Because p-value is a probability.
|
||||
p_value = std::min(1.0, std::max(0.0, p_value));
|
||||
|
@ -66,31 +66,31 @@ public:
|
||||
{
|
||||
Float64 new_x = columns[0]->getFloat64(row_num);
|
||||
Float64 new_y = columns[1]->getFloat64(row_num);
|
||||
this->data(place).addX(new_x, arena);
|
||||
this->data(place).addY(new_y, arena);
|
||||
data(place).addX(new_x, arena);
|
||||
data(place).addY(new_y, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto answer = this->data(place).getResult();
|
||||
auto answer = data(place).getResult();
|
||||
|
||||
auto & column = static_cast<ColumnVector<Float64> &>(to);
|
||||
column.getData().push_back(answer);
|
||||
|
@ -102,24 +102,24 @@ public:
|
||||
auto event = assert_cast<const ColumnVector<UInt8> *>(columns[i])->getData()[row_num];
|
||||
if (event)
|
||||
{
|
||||
this->data(place).add(i);
|
||||
data(place).add(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
@ -130,13 +130,13 @@ public:
|
||||
ColumnArray::Offset current_offset = data_to.size();
|
||||
data_to.resize(current_offset + events_size);
|
||||
|
||||
const bool first_flag = this->data(place).events.test(0);
|
||||
const bool first_flag = data(place).events.test(0);
|
||||
data_to[current_offset] = first_flag;
|
||||
++current_offset;
|
||||
|
||||
for (size_t i = 1; i < events_size; ++i)
|
||||
{
|
||||
data_to[current_offset] = (first_flag && this->data(place).events.test(i));
|
||||
data_to[current_offset] = (first_flag && data(place).events.test(i));
|
||||
++current_offset;
|
||||
}
|
||||
|
||||
|
@ -123,22 +123,22 @@ public:
|
||||
Float64 x = columns[0]->getFloat64(row_num);
|
||||
Float64 y = columns[1]->getFloat64(row_num);
|
||||
|
||||
this->data(place).add(x, y);
|
||||
data(place).add(x, y);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
@ -168,8 +168,8 @@ public:
|
||||
IColumn & to,
|
||||
Arena *) const override
|
||||
{
|
||||
Float64 k = this->data(place).getK();
|
||||
Float64 b = this->data(place).getB(k);
|
||||
Float64 k = data(place).getK();
|
||||
Float64 b = data(place).getB(k);
|
||||
|
||||
auto & col_tuple = assert_cast<ColumnTuple &>(to);
|
||||
auto & col_k = assert_cast<ColumnVector<Float64> &>(col_tuple.getColumn(0));
|
||||
|
@ -120,7 +120,7 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], row_num, arena);
|
||||
data(place).add(*columns[0], row_num, arena);
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(
|
||||
@ -131,7 +131,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override
|
||||
{
|
||||
if (this->data(place).isNull())
|
||||
if (data(place).isNull())
|
||||
return;
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlace(
|
||||
row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
@ -146,7 +146,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override
|
||||
{
|
||||
if (this->data(place).isNull())
|
||||
if (data(place).isNull())
|
||||
return;
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlaceNotNull(
|
||||
row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
@ -154,29 +154,29 @@ public:
|
||||
|
||||
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], 0, arena);
|
||||
data(place).add(*columns[0], 0, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(this->data(rhs), arena);
|
||||
data(place).add(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -150,13 +150,13 @@ private:
|
||||
|
||||
Float64 getResult(ConstAggregateDataPtr __restrict place) const
|
||||
{
|
||||
const auto & data = this->data(place);
|
||||
const auto & dt = data(place);
|
||||
switch (kind)
|
||||
{
|
||||
case VarKind::varSampStable: return getVarSamp(data.m2, data.count);
|
||||
case VarKind::stddevSampStable: return getStddevSamp(data.m2, data.count);
|
||||
case VarKind::varPopStable: return getVarPop(data.m2, data.count);
|
||||
case VarKind::stddevPopStable: return getStddevPop(data.m2, data.count);
|
||||
case VarKind::varSampStable: return getVarSamp(dt.m2, dt.count);
|
||||
case VarKind::stddevSampStable: return getStddevSamp(dt.m2, dt.count);
|
||||
case VarKind::varPopStable: return getVarPop(dt.m2, dt.count);
|
||||
case VarKind::stddevPopStable: return getStddevPop(dt.m2, dt.count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,22 +182,22 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
this->data(place).update(*columns[0], row_num);
|
||||
data(place).update(*columns[0], row_num);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).mergeWith(this->data(rhs));
|
||||
data(place).mergeWith(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
|
@ -491,7 +491,7 @@ public:
|
||||
std::vector<const UInt8 *> nullable_filters;
|
||||
const IColumn * nested_columns[number_of_arguments];
|
||||
|
||||
std::unique_ptr<UInt8[]> final_flags = nullptr;
|
||||
std::unique_ptr<UInt8[]> final_flags;
|
||||
const UInt8 * final_flags_ptr = nullptr;
|
||||
|
||||
if (if_argument_pos >= 0)
|
||||
|
@ -258,10 +258,10 @@ private:
|
||||
chassert(limit > 0);
|
||||
|
||||
/// With a large number of values, we will generate random numbers several times slower.
|
||||
if (limit <= static_cast<UInt64>(rng.max()))
|
||||
if (limit <= static_cast<UInt64>(pcg32_fast::max()))
|
||||
return rng() % limit;
|
||||
else
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32_fast::max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
|
||||
}
|
||||
|
||||
void sortIfNeeded()
|
||||
|
@ -1940,8 +1940,7 @@ std::vector<String> QueryAnalyzer::collectIdentifierTypoHints(const Identifier &
|
||||
for (const auto & valid_identifier : valid_identifiers)
|
||||
prompting_strings.push_back(valid_identifier.getFullName());
|
||||
|
||||
NamePrompter<1> prompter;
|
||||
return prompter.getHints(unresolved_identifier.getFullName(), prompting_strings);
|
||||
return NamePrompter<1>::getHints(unresolved_identifier.getFullName(), prompting_strings);
|
||||
}
|
||||
|
||||
/** Wrap expression node in tuple element function calls for nested paths.
|
||||
|
@ -444,8 +444,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
|
||||
nulls_sort_direction = order_by_element.nulls_direction == 1 ? SortDirection::ASCENDING : SortDirection::DESCENDING;
|
||||
|
||||
std::shared_ptr<Collator> collator;
|
||||
if (order_by_element.collation)
|
||||
collator = std::make_shared<Collator>(order_by_element.collation->as<ASTLiteral &>().value.get<String &>());
|
||||
if (order_by_element.getCollation())
|
||||
collator = std::make_shared<Collator>(order_by_element.getCollation()->as<ASTLiteral &>().value.get<String &>());
|
||||
|
||||
const auto & sort_expression_ast = order_by_element.children.at(0);
|
||||
auto sort_expression = buildExpression(sort_expression_ast, context);
|
||||
@ -455,12 +455,12 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
|
||||
std::move(collator),
|
||||
order_by_element.with_fill);
|
||||
|
||||
if (order_by_element.fill_from)
|
||||
sort_node->getFillFrom() = buildExpression(order_by_element.fill_from, context);
|
||||
if (order_by_element.fill_to)
|
||||
sort_node->getFillTo() = buildExpression(order_by_element.fill_to, context);
|
||||
if (order_by_element.fill_step)
|
||||
sort_node->getFillStep() = buildExpression(order_by_element.fill_step, context);
|
||||
if (order_by_element.getFillFrom())
|
||||
sort_node->getFillFrom() = buildExpression(order_by_element.getFillFrom(), context);
|
||||
if (order_by_element.getFillTo())
|
||||
sort_node->getFillTo() = buildExpression(order_by_element.getFillTo(), context);
|
||||
if (order_by_element.getFillStep())
|
||||
sort_node->getFillStep() = buildExpression(order_by_element.getFillStep(), context);
|
||||
|
||||
list_node->getNodes().push_back(std::move(sort_node));
|
||||
}
|
||||
@ -558,7 +558,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildExpression(const ASTPtr & expression, co
|
||||
}
|
||||
else if (const auto * function = expression->as<ASTFunction>())
|
||||
{
|
||||
if (function->is_lambda_function)
|
||||
if (function->is_lambda_function || isASTLambdaFunction(*function))
|
||||
{
|
||||
const auto & lambda_arguments_and_expression = function->arguments->as<ASTExpressionList &>().children;
|
||||
auto & lambda_arguments_tuple = lambda_arguments_and_expression.at(0)->as<ASTFunction &>();
|
||||
|
@ -1,14 +1,15 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/Block.h>
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
#include <QueryPipeline/SizeLimits.h>
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class IDataType;
|
||||
using DataTypePtr = std::shared_ptr<const IDataType>;
|
||||
|
||||
class Set;
|
||||
using SetPtr = std::shared_ptr<Set>;
|
||||
|
||||
|
@ -120,17 +120,18 @@ ASTPtr SortNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
|
||||
result->nulls_direction_was_explicitly_specified = nulls_sort_direction.has_value();
|
||||
|
||||
result->with_fill = with_fill;
|
||||
result->fill_from = hasFillFrom() ? getFillFrom()->toAST(options) : nullptr;
|
||||
result->fill_to = hasFillTo() ? getFillTo()->toAST(options) : nullptr;
|
||||
result->fill_step = hasFillStep() ? getFillStep()->toAST(options) : nullptr;
|
||||
result->children.push_back(getExpression()->toAST(options));
|
||||
|
||||
if (collator)
|
||||
{
|
||||
result->children.push_back(std::make_shared<ASTLiteral>(Field(collator->getLocale())));
|
||||
result->collation = result->children.back();
|
||||
}
|
||||
result->setCollation(std::make_shared<ASTLiteral>(Field(collator->getLocale())));
|
||||
|
||||
result->with_fill = with_fill;
|
||||
if (hasFillFrom())
|
||||
result->setFillFrom(getFillFrom()->toAST(options));
|
||||
if (hasFillTo())
|
||||
result->setFillTo(getFillTo()->toAST(options));
|
||||
if (hasFillStep())
|
||||
result->setFillStep(getFillStep()->toAST(options));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -124,11 +124,12 @@ BackupReaderS3::BackupReaderS3(
|
||||
bool allow_s3_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_)
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup)
|
||||
: BackupReaderDefault(read_settings_, write_settings_, getLogger("BackupReaderS3"))
|
||||
, s3_uri(s3_uri_)
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false}
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName()))
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName(), /*ignore_user=*/is_internal_backup))
|
||||
{
|
||||
auto & request_settings = s3_settings.request_settings;
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
@ -214,11 +215,12 @@ BackupWriterS3::BackupWriterS3(
|
||||
const String & storage_class_name,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_)
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup)
|
||||
: BackupWriterDefault(read_settings_, write_settings_, getLogger("BackupWriterS3"))
|
||||
, s3_uri(s3_uri_)
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false}
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName()))
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName(), /*ignore_user=*/is_internal_backup))
|
||||
{
|
||||
auto & request_settings = s3_settings.request_settings;
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
|
@ -18,7 +18,15 @@ namespace DB
|
||||
class BackupReaderS3 : public BackupReaderDefault
|
||||
{
|
||||
public:
|
||||
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
|
||||
BackupReaderS3(
|
||||
const S3::URI & s3_uri_,
|
||||
const String & access_key_id_,
|
||||
const String & secret_access_key_,
|
||||
bool allow_s3_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup);
|
||||
~BackupReaderS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
@ -41,7 +49,16 @@ private:
|
||||
class BackupWriterS3 : public BackupWriterDefault
|
||||
{
|
||||
public:
|
||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const String & storage_class_name, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
|
||||
BackupWriterS3(
|
||||
const S3::URI & s3_uri_,
|
||||
const String & access_key_id_,
|
||||
const String & secret_access_key_,
|
||||
bool allow_s3_native_copy,
|
||||
const String & storage_class_name,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup);
|
||||
~BackupWriterS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
|
@ -940,6 +940,7 @@ void BackupsWorker::doRestore(
|
||||
backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup;
|
||||
backup_open_params.read_settings = getReadSettingsForRestore(context);
|
||||
backup_open_params.write_settings = getWriteSettingsForRestore(context);
|
||||
backup_open_params.is_internal_backup = restore_settings.internal;
|
||||
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
|
||||
|
||||
String current_database = context->getCurrentDatabase();
|
||||
|
@ -110,7 +110,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.allow_s3_native_copy,
|
||||
params.read_settings,
|
||||
params.write_settings,
|
||||
params.context);
|
||||
params.context,
|
||||
params.is_internal_backup);
|
||||
|
||||
return std::make_unique<BackupImpl>(
|
||||
params.backup_info,
|
||||
@ -129,7 +130,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.s3_storage_class,
|
||||
params.read_settings,
|
||||
params.write_settings,
|
||||
params.context);
|
||||
params.context,
|
||||
params.is_internal_backup);
|
||||
|
||||
return std::make_unique<BackupImpl>(
|
||||
params.backup_info,
|
||||
|
@ -207,11 +207,17 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
max_entries = nested_pools.size();
|
||||
}
|
||||
else if (pool_mode == PoolMode::GET_ONE)
|
||||
{
|
||||
max_entries = 1;
|
||||
}
|
||||
else if (pool_mode == PoolMode::GET_MANY)
|
||||
{
|
||||
max_entries = settings.max_parallel_replicas;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode");
|
||||
}
|
||||
|
||||
if (!priority_func)
|
||||
priority_func = makeGetPriorityFunc(settings);
|
||||
|
@ -82,7 +82,7 @@ std::vector<Connection *> HedgedConnectionsFactory::getManyConnections(PoolMode
|
||||
}
|
||||
case PoolMode::GET_MANY:
|
||||
{
|
||||
max_entries = max_parallel_replicas;
|
||||
max_entries = std::min(max_parallel_replicas, shuffled_pools.size());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ private:
|
||||
/// checking the number of requested replicas that are still in process).
|
||||
size_t requested_connections_count = 0;
|
||||
|
||||
const size_t max_parallel_replicas = 0;
|
||||
const size_t max_parallel_replicas = 1;
|
||||
const bool skip_unavailable_shards = false;
|
||||
};
|
||||
|
||||
|
@ -346,7 +346,7 @@ ColumnPtr ColumnSparse::filter(const Filter & filt, ssize_t) const
|
||||
}
|
||||
|
||||
auto res_values = values->filter(values_filter, values_result_size_hint);
|
||||
return this->create(res_values, std::move(res_offsets), res_offset);
|
||||
return create(res_values, std::move(res_offsets), res_offset);
|
||||
}
|
||||
|
||||
void ColumnSparse::expand(const Filter & mask, bool inverted)
|
||||
|
@ -671,7 +671,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
ReadableSize(rss),
|
||||
ReadableSize(difference));
|
||||
|
||||
total_memory_tracker.setRSS(rss, free_memory_in_allocator_arenas);
|
||||
MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,10 +2,13 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Common/Logger.h>
|
||||
#include <Common/SharedMutex.h>
|
||||
#include <Common/SharedLockGuard.h>
|
||||
#include <Common/SharedMutex.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** AtomicLogger allows to atomically change logger.
|
||||
* Standard library does not have atomic_shared_ptr, and we do not use std::atomic* operations,
|
||||
@ -49,3 +52,5 @@ private:
|
||||
mutable DB::SharedMutex log_mutex;
|
||||
LoggerPtr logger;
|
||||
};
|
||||
|
||||
}
|
||||
|
16
src/Common/CurrentThreadHelpers.cpp
Normal file
16
src/Common/CurrentThreadHelpers.cpp
Normal file
@ -0,0 +1,16 @@
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/CurrentThreadHelpers.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool currentThreadHasGroup()
|
||||
{
|
||||
return DB::CurrentThread::getGroup() != nullptr;
|
||||
}
|
||||
|
||||
LogsLevel currentThreadLogsLevel()
|
||||
{
|
||||
return DB::CurrentThread::get().getClientLogsLevel();
|
||||
}
|
||||
}
|
9
src/Common/CurrentThreadHelpers.h
Normal file
9
src/Common/CurrentThreadHelpers.h
Normal file
@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/LogsLevel.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
bool currentThreadHasGroup();
|
||||
LogsLevel currentThreadLogsLevel();
|
||||
}
|
@ -1,13 +1,15 @@
|
||||
#include "DateLUT.h"
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
|
||||
#include <Poco/DigestStream.h>
|
||||
#include <Poco/Exception.h>
|
||||
#include <Poco/SHA1Engine.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
|
||||
namespace
|
||||
@ -140,6 +142,38 @@ std::string determineDefaultTimeZone()
|
||||
|
||||
}
|
||||
|
||||
const DateLUTImpl & DateLUT::instance()
|
||||
{
|
||||
const auto & date_lut = getInstance();
|
||||
|
||||
if (DB::CurrentThread::isInitialized())
|
||||
{
|
||||
std::string timezone_from_context;
|
||||
const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext();
|
||||
|
||||
if (query_context)
|
||||
{
|
||||
timezone_from_context = extractTimezoneFromContext(query_context);
|
||||
|
||||
if (!timezone_from_context.empty())
|
||||
return date_lut.getImplementation(timezone_from_context);
|
||||
}
|
||||
|
||||
/// On the server side, timezone is passed in query_context,
|
||||
/// but on CH-client side we have no query context,
|
||||
/// and each time we modify client's global context
|
||||
const DB::ContextPtr global_context = DB::CurrentThread::get().getGlobalContext();
|
||||
if (global_context)
|
||||
{
|
||||
timezone_from_context = extractTimezoneFromContext(global_context);
|
||||
|
||||
if (!timezone_from_context.empty())
|
||||
return date_lut.getImplementation(timezone_from_context);
|
||||
}
|
||||
}
|
||||
return serverTimezoneInstance();
|
||||
}
|
||||
|
||||
DateLUT::DateLUT()
|
||||
{
|
||||
/// Initialize the pointer to the default DateLUTImpl.
|
||||
|
@ -1,17 +1,23 @@
|
||||
#pragma once
|
||||
|
||||
#include "DateLUTImpl.h"
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include "Common/CurrentThread.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class Context;
|
||||
using ContextPtr = std::shared_ptr<const Context>;
|
||||
}
|
||||
|
||||
class DateLUTImpl;
|
||||
|
||||
|
||||
/// This class provides lazy initialization and lookup of singleton DateLUTImpl objects for a given timezone.
|
||||
class DateLUT : private boost::noncopyable
|
||||
@ -20,38 +26,7 @@ public:
|
||||
/// Return DateLUTImpl instance for session timezone.
|
||||
/// session_timezone is a session-level setting.
|
||||
/// If setting is not set, returns the server timezone.
|
||||
static ALWAYS_INLINE const DateLUTImpl & instance()
|
||||
{
|
||||
const auto & date_lut = getInstance();
|
||||
|
||||
if (DB::CurrentThread::isInitialized())
|
||||
{
|
||||
std::string timezone_from_context;
|
||||
const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext();
|
||||
|
||||
if (query_context)
|
||||
{
|
||||
timezone_from_context = extractTimezoneFromContext(query_context);
|
||||
|
||||
if (!timezone_from_context.empty())
|
||||
return date_lut.getImplementation(timezone_from_context);
|
||||
}
|
||||
|
||||
/// On the server side, timezone is passed in query_context,
|
||||
/// but on CH-client side we have no query context,
|
||||
/// and each time we modify client's global context
|
||||
const DB::ContextPtr global_context = DB::CurrentThread::get().getGlobalContext();
|
||||
if (global_context)
|
||||
{
|
||||
timezone_from_context = extractTimezoneFromContext(global_context);
|
||||
|
||||
if (!timezone_from_context.empty())
|
||||
return date_lut.getImplementation(timezone_from_context);
|
||||
}
|
||||
|
||||
}
|
||||
return serverTimezoneInstance();
|
||||
}
|
||||
static const DateLUTImpl & instance();
|
||||
|
||||
static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone)
|
||||
{
|
||||
|
@ -1,8 +1,5 @@
|
||||
#include "DateLUTImpl.h"
|
||||
|
||||
#include <cctz/civil_time.h>
|
||||
#include <cctz/time_zone.h>
|
||||
#include <cctz/zone_info_source.h>
|
||||
#include <Core/DecimalFunctions.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <algorithm>
|
||||
@ -11,6 +8,10 @@
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
|
||||
#include <cctz/civil_time.h>
|
||||
#include <cctz/time_zone.h>
|
||||
#include <cctz/zone_info_source.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -214,6 +215,29 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int DateLUTImpl::toMillisecond(const DB::DateTime64 & datetime, Int64 scale_multiplier) const
|
||||
{
|
||||
constexpr Int64 millisecond_multiplier = 1'000;
|
||||
constexpr Int64 microsecond_multiplier = 1'000 * millisecond_multiplier;
|
||||
constexpr Int64 divider = microsecond_multiplier / millisecond_multiplier;
|
||||
|
||||
auto components = DB::DecimalUtils::splitWithScaleMultiplier(datetime, scale_multiplier);
|
||||
|
||||
if (datetime.value < 0 && components.fractional)
|
||||
{
|
||||
components.fractional = scale_multiplier + (components.whole ? Int64(-1) : Int64(1)) * components.fractional;
|
||||
--components.whole;
|
||||
}
|
||||
Int64 fractional = components.fractional;
|
||||
if (scale_multiplier > microsecond_multiplier)
|
||||
fractional = fractional / (scale_multiplier / microsecond_multiplier);
|
||||
else if (scale_multiplier < microsecond_multiplier)
|
||||
fractional = fractional * (microsecond_multiplier / scale_multiplier);
|
||||
|
||||
UInt16 millisecond = static_cast<UInt16>(fractional / divider);
|
||||
return millisecond;
|
||||
}
|
||||
|
||||
|
||||
/// Prefer to load timezones from blobs linked to the binary.
|
||||
/// The blobs are provided by "tzdata" library.
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <base/DayNum.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <Core/DecimalFunctions.h>
|
||||
|
||||
#include <ctime>
|
||||
#include <cassert>
|
||||
@ -50,6 +49,11 @@ enum class WeekDayMode
|
||||
WeekStartsSunday1 = 3
|
||||
};
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class DateTime64;
|
||||
}
|
||||
|
||||
/** Lookup table to conversion of time to date, and to month / year / day of week / day of month and so on.
|
||||
* First time was implemented for OLAPServer, that needed to do billions of such transformations.
|
||||
*/
|
||||
@ -593,29 +597,7 @@ public:
|
||||
return time % 60;
|
||||
}
|
||||
|
||||
template <typename DateOrTime>
|
||||
unsigned toMillisecond(const DateOrTime & datetime, Int64 scale_multiplier) const
|
||||
{
|
||||
constexpr Int64 millisecond_multiplier = 1'000;
|
||||
constexpr Int64 microsecond_multiplier = 1'000 * millisecond_multiplier;
|
||||
constexpr Int64 divider = microsecond_multiplier / millisecond_multiplier;
|
||||
|
||||
auto components = DB::DecimalUtils::splitWithScaleMultiplier(datetime, scale_multiplier);
|
||||
|
||||
if (datetime.value < 0 && components.fractional)
|
||||
{
|
||||
components.fractional = scale_multiplier + (components.whole ? Int64(-1) : Int64(1)) * components.fractional;
|
||||
--components.whole;
|
||||
}
|
||||
Int64 fractional = components.fractional;
|
||||
if (scale_multiplier > microsecond_multiplier)
|
||||
fractional = fractional / (scale_multiplier / microsecond_multiplier);
|
||||
else if (scale_multiplier < microsecond_multiplier)
|
||||
fractional = fractional * (microsecond_multiplier / scale_multiplier);
|
||||
|
||||
UInt16 millisecond = static_cast<UInt16>(fractional / divider);
|
||||
return millisecond;
|
||||
}
|
||||
unsigned toMillisecond(const DB::DateTime64 & datetime, Int64 scale_multiplier) const;
|
||||
|
||||
unsigned toMinute(Time t) const
|
||||
{
|
||||
|
@ -1,26 +1,27 @@
|
||||
#include "Exception.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <cxxabi.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <base/demangle.h>
|
||||
#include <Poco/String.h>
|
||||
#include <Common/AtomicLogger.h>
|
||||
#include <Common/ErrorCodes.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/LockMemoryExceptionInThread.h>
|
||||
#include <Common/MemorySanitizer.h>
|
||||
#include <Common/SensitiveDataMasker.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <cxxabi.h>
|
||||
|
||||
#include <Poco/String.h>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
|
@ -1,22 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <cerrno>
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include <Poco/Exception.h>
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/int8_to_string.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <Common/AtomicLogger.h>
|
||||
#include <Common/Logger.h>
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
#include <Common/StackTrace.h>
|
||||
|
||||
#include <cerrno>
|
||||
#include <exception>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <Poco/Exception.h>
|
||||
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
@ -24,6 +22,8 @@ namespace Poco { class Logger; }
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class AtomicLogger;
|
||||
|
||||
[[noreturn]] void abortOnFailedAssertion(const String & description);
|
||||
|
||||
/// This flag can be set for testing purposes - to check that no exceptions are thrown.
|
||||
|
@ -203,7 +203,7 @@ public:
|
||||
if (total_connections_in_group >= limits.warning_limit && total_connections_in_group >= mute_warning_until)
|
||||
{
|
||||
LOG_WARNING(log, "Too many active sessions in group {}, count {}, warning limit {}", type, total_connections_in_group, limits.warning_limit);
|
||||
mute_warning_until = roundUp(total_connections_in_group, limits.warning_step);
|
||||
mute_warning_until = roundUp(total_connections_in_group, HTTPConnectionPools::Limits::warning_step);
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,8 +295,13 @@ private:
|
||||
String getTarget() const
|
||||
{
|
||||
if (!Session::getProxyConfig().host.empty())
|
||||
return fmt::format("{} over proxy {}", Session::getHost(), Session::getProxyConfig().host);
|
||||
return Session::getHost();
|
||||
return fmt::format("{}:{} over proxy {}",
|
||||
Session::getHost(),
|
||||
Session::getPort(),
|
||||
Session::getProxyConfig().host);
|
||||
return fmt::format("{}:{}",
|
||||
Session::getHost(),
|
||||
Session::getPort());
|
||||
}
|
||||
|
||||
void flushRequest() override
|
||||
@ -472,7 +477,8 @@ public:
|
||||
String getTarget() const
|
||||
{
|
||||
if (!proxy_configuration.isEmpty())
|
||||
return fmt::format("{} over proxy {}", host, proxy_configuration.host);
|
||||
return fmt::format("{} over proxy {}",
|
||||
host, proxy_configuration.host);
|
||||
return host;
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ public:
|
||||
void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func)
|
||||
{
|
||||
DB::PrefetchingHelper prefetching;
|
||||
size_t prefetch_look_ahead = prefetching.getInitialLookAheadValue();
|
||||
size_t prefetch_look_ahead = DB::PrefetchingHelper::getInitialLookAheadValue();
|
||||
|
||||
size_t i = 0;
|
||||
auto prefetch_it = advanceIterator(this->begin(), prefetch_look_ahead);
|
||||
@ -216,10 +216,10 @@ public:
|
||||
{
|
||||
if constexpr (prefetch)
|
||||
{
|
||||
if (i == prefetching.iterationsToMeasure())
|
||||
if (i == DB::PrefetchingHelper::iterationsToMeasure())
|
||||
{
|
||||
prefetch_look_ahead = prefetching.calcPrefetchLookAhead();
|
||||
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - prefetching.getInitialLookAheadValue());
|
||||
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - DB::PrefetchingHelper::getInitialLookAheadValue());
|
||||
}
|
||||
|
||||
if (prefetch_it != end)
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#if USE_JEMALLOC
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <jemalloc/jemalloc.h>
|
||||
|
@ -1,9 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <exception>
|
||||
#include <string>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
|
||||
|
||||
/** Stores a calendar date in broken-down form (year, month, day-in-month).
|
||||
|
@ -1,15 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <base/defines.h>
|
||||
|
||||
#include <Poco/Channel.h>
|
||||
#include <memory>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Message.h>
|
||||
|
||||
using LoggerPtr = Poco::LoggerPtr;
|
||||
namespace Poco
|
||||
{
|
||||
class Channel;
|
||||
class Logger;
|
||||
using LoggerPtr = std::shared_ptr<Logger>;
|
||||
}
|
||||
|
||||
using LoggerPtr = std::shared_ptr<Poco::Logger>;
|
||||
using LoggerRawPtr = Poco::Logger *;
|
||||
|
||||
/** RAII wrappers around Poco/Logger.h.
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/ProfileEventsScope.h>
|
||||
|
||||
namespace DB
|
||||
|
@ -1,7 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,15 +1,16 @@
|
||||
#include "QueryProfiler.h"
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/TraceSender.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/phdr_cache.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/StackTrace.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Common/TraceSender.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/phdr_cache.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
||||
#include <random>
|
||||
|
||||
|
@ -366,7 +366,7 @@ String demangleAndCollapseNames(std::optional<std::string_view> file, const char
|
||||
if (file.has_value())
|
||||
{
|
||||
std::string_view file_copy = file.value();
|
||||
if (auto trim_pos = file_copy.find_last_of('/'); trim_pos != file_copy.npos)
|
||||
if (auto trim_pos = file_copy.find_last_of('/'); trim_pos != std::string_view::npos)
|
||||
file_copy.remove_suffix(file_copy.size() - trim_pos);
|
||||
if (file_copy.ends_with("functional"))
|
||||
return "?";
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
||||
#include <Common/ThreadFuzzer.h>
|
||||
|
@ -96,7 +96,7 @@ ThreadStatus::ThreadStatus(bool check_current_thread_on_destruction_)
|
||||
stack_t altstack_description{};
|
||||
altstack_description.ss_sp = alt_stack.getData();
|
||||
altstack_description.ss_flags = 0;
|
||||
altstack_description.ss_size = alt_stack.getSize();
|
||||
altstack_description.ss_size = ThreadStack::getSize();
|
||||
|
||||
if (0 != sigaltstack(&altstack_description, nullptr))
|
||||
{
|
||||
|
@ -1,7 +1,8 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperImpl.h>
|
||||
|
||||
#include <Compression/CompressedReadBuffer.h>
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
@ -10,17 +11,17 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <base/getThreadId.h>
|
||||
#include <base/sleep.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/EventNotifier.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperImpl.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Compression/CompressedReadBuffer.h>
|
||||
#include <Compression/CompressedWriteBuffer.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
||||
#include "Coordination/KeeperConstants.h"
|
||||
#include "config.h"
|
||||
|
@ -5,13 +5,11 @@
|
||||
#include <fmt/format.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Message.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
#include <Common/Logger.h>
|
||||
#include <Common/AtomicLogger.h>
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
#include <Common/CurrentThreadHelpers.h>
|
||||
#include <Common/Logger.h>
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
|
||||
|
||||
#define LogToStr(x, y) std::make_unique<LogToStrImpl>(x, y)
|
||||
@ -22,7 +20,7 @@ using LogSeriesLimiterPtr = std::shared_ptr<LogSeriesLimiter>;
|
||||
namespace impl
|
||||
{
|
||||
[[maybe_unused]] inline LoggerPtr getLoggerHelper(const LoggerPtr & logger) { return logger; }
|
||||
[[maybe_unused]] inline LoggerPtr getLoggerHelper(const AtomicLogger & logger) { return logger.load(); }
|
||||
[[maybe_unused]] inline LoggerPtr getLoggerHelper(const DB::AtomicLogger & logger) { return logger.load(); }
|
||||
[[maybe_unused]] inline const ::Poco::Logger * getLoggerHelper(const ::Poco::Logger * logger) { return logger; }
|
||||
[[maybe_unused]] inline std::unique_ptr<LogToStrImpl> getLoggerHelper(std::unique_ptr<LogToStrImpl> && logger) { return logger; }
|
||||
[[maybe_unused]] inline std::unique_ptr<LogFrequencyLimiterIml> getLoggerHelper(std::unique_ptr<LogFrequencyLimiterIml> && logger) { return logger; }
|
||||
@ -66,8 +64,7 @@ namespace impl
|
||||
#define LOG_IMPL(logger, priority, PRIORITY, ...) do \
|
||||
{ \
|
||||
auto _logger = ::impl::getLoggerHelper(logger); \
|
||||
const bool _is_clients_log = (DB::CurrentThread::getGroup() != nullptr) && \
|
||||
(DB::CurrentThread::get().getClientLogsLevel() >= (priority)); \
|
||||
const bool _is_clients_log = DB::currentThreadHasGroup() && DB::currentThreadLogsLevel() >= (priority); \
|
||||
if (!_is_clients_log && !_logger->is((PRIORITY))) \
|
||||
break; \
|
||||
\
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/Logger.h>
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <Common/HostResolvePool.h>
|
||||
#include <base/sleep.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/HostResolvePool.h>
|
||||
|
||||
#include <thread>
|
||||
#include <gtest/gtest.h>
|
||||
|
@ -38,7 +38,7 @@ void CachedCompressedReadBuffer::prefetch(Priority priority)
|
||||
bool CachedCompressedReadBuffer::nextImpl()
|
||||
{
|
||||
/// Let's check for the presence of a decompressed block in the cache, grab the ownership of this block, if it exists.
|
||||
UInt128 key = cache->hash(path, file_pos);
|
||||
UInt128 key = UncompressedCache::hash(path, file_pos);
|
||||
|
||||
owned_cell = cache->getOrSet(key, [&]()
|
||||
{
|
||||
|
@ -2219,7 +2219,7 @@ uint64_t Changelog::getStartIndex() const
|
||||
LogEntryPtr Changelog::getLastEntry() const
|
||||
{
|
||||
/// This entry treaded in special way by NuRaft
|
||||
static LogEntryPtr fake_entry = nuraft::cs_new<nuraft::log_entry>(0, nuraft::buffer::alloc(sizeof(uint64_t)));
|
||||
static LogEntryPtr fake_entry = nuraft::cs_new<nuraft::log_entry>(0, nuraft::buffer::alloc(0));
|
||||
|
||||
auto entry = entry_storage.getEntry(max_log_id);
|
||||
if (entry == nullptr)
|
||||
|
@ -592,7 +592,7 @@ String RecalculateCommand::run()
|
||||
|
||||
String CleanResourcesCommand::run()
|
||||
{
|
||||
keeper_dispatcher.cleanResources();
|
||||
KeeperDispatcher::cleanResources();
|
||||
return "ok";
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ ptr<log_entry> makeClone(const ptr<log_entry> & entry)
|
||||
InMemoryLogStore::InMemoryLogStore()
|
||||
: start_idx(1)
|
||||
{
|
||||
nuraft::ptr<nuraft::buffer> buf = nuraft::buffer::alloc(sizeof(uint64_t));
|
||||
nuraft::ptr<nuraft::buffer> buf = nuraft::buffer::alloc(0);
|
||||
logs[0] = nuraft::cs_new<nuraft::log_entry>(0, buf);
|
||||
}
|
||||
|
||||
|
@ -1,21 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <base/scope_guard.h>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <Poco/Notification.h>
|
||||
#include <Poco/NotificationQueue.h>
|
||||
#include <Poco/Timestamp.h>
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <functional>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <Common/ZooKeeper/Types.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/ThreadPool_fwd.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <Common/ZooKeeper/Types.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -188,7 +188,7 @@ class IColumn;
|
||||
\
|
||||
M(Bool, group_by_use_nulls, false, "Treat columns mentioned in ROLLUP, CUBE or GROUPING SETS as Nullable", 0) \
|
||||
\
|
||||
M(UInt64, max_parallel_replicas, 1, "The maximum number of replicas of each shard used when the query is executed. For consistency (to get different parts of the same partition), this option only works for the specified sampling key. The lag of the replicas is not controlled.", 0) \
|
||||
M(NonZeroUInt64, max_parallel_replicas, 1, "The maximum number of replicas of each shard used when the query is executed. For consistency (to get different parts of the same partition), this option only works for the specified sampling key. The lag of the replicas is not controlled. Should be always greater than 0", 0) \
|
||||
M(UInt64, parallel_replicas_count, 0, "This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the number of parallel replicas participating in query processing.", 0) \
|
||||
M(UInt64, parallel_replica_offset, 0, "This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the index of the replica participating in query processing among parallel replicas.", 0) \
|
||||
M(String, parallel_replicas_custom_key, "", "Custom key assigning work to replicas when parallel replicas are used.", 0) \
|
||||
@ -590,6 +590,7 @@ class IColumn;
|
||||
M(Bool, optimize_respect_aliases, true, "If it is set to true, it will respect aliases in WHERE/GROUP BY/ORDER BY, that will help with partition pruning/secondary indexes/optimize_aggregation_in_order/optimize_read_in_order/optimize_trivial_count", 0) \
|
||||
M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \
|
||||
M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) ALIAS(allow_experimental_lightweight_delete) \
|
||||
M(UInt64, lightweight_deletes_sync, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes", 0) \
|
||||
M(Bool, apply_deleted_mask, true, "Enables filtering out rows deleted with lightweight DELETE. If disabled, a query will be able to read those rows. This is useful for debugging and \"undelete\" scenarios", 0) \
|
||||
M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \
|
||||
M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \
|
||||
|
@ -86,6 +86,7 @@ namespace SettingsChangesHistory
|
||||
static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> settings_changes_history =
|
||||
{
|
||||
{"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"},
|
||||
{"lightweight_deletes_sync", 2, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes"},
|
||||
}},
|
||||
{"24.3", {{"s3_connect_timeout_ms", 1000, 1000, "Introduce new dedicated setting for s3 connection timeout"},
|
||||
{"allow_experimental_shared_merge_tree", false, true, "The setting is obsolete"},
|
||||
|
@ -575,4 +575,40 @@ void SettingFieldCustom::readBinary(ReadBuffer & in)
|
||||
parseFromString(str);
|
||||
}
|
||||
|
||||
SettingFieldNonZeroUInt64::SettingFieldNonZeroUInt64(UInt64 x) : SettingFieldUInt64(x)
|
||||
{
|
||||
checkValueNonZero();
|
||||
}
|
||||
|
||||
SettingFieldNonZeroUInt64::SettingFieldNonZeroUInt64(const DB::Field & f) : SettingFieldUInt64(f)
|
||||
{
|
||||
checkValueNonZero();
|
||||
}
|
||||
|
||||
SettingFieldNonZeroUInt64 & SettingFieldNonZeroUInt64::operator=(UInt64 x)
|
||||
{
|
||||
SettingFieldUInt64::operator=(x);
|
||||
checkValueNonZero();
|
||||
return *this;
|
||||
}
|
||||
|
||||
SettingFieldNonZeroUInt64 & SettingFieldNonZeroUInt64::operator=(const DB::Field & f)
|
||||
{
|
||||
SettingFieldUInt64::operator=(f);
|
||||
checkValueNonZero();
|
||||
return *this;
|
||||
}
|
||||
|
||||
void SettingFieldNonZeroUInt64::parseFromString(const String & str)
|
||||
{
|
||||
SettingFieldUInt64::parseFromString(str);
|
||||
checkValueNonZero();
|
||||
}
|
||||
|
||||
void SettingFieldNonZeroUInt64::checkValueNonZero() const
|
||||
{
|
||||
if (value == 0)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "A setting's value has to be greater than 0");
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -516,4 +516,19 @@ struct SettingFieldCustom
|
||||
void readBinary(ReadBuffer & in);
|
||||
};
|
||||
|
||||
struct SettingFieldNonZeroUInt64 : public SettingFieldUInt64
|
||||
{
|
||||
public:
|
||||
explicit SettingFieldNonZeroUInt64(UInt64 x = 1);
|
||||
explicit SettingFieldNonZeroUInt64(const Field & f);
|
||||
|
||||
SettingFieldNonZeroUInt64 & operator=(UInt64 x);
|
||||
SettingFieldNonZeroUInt64 & operator=(const Field & f);
|
||||
|
||||
void parseFromString(const String & str);
|
||||
|
||||
private:
|
||||
void checkValueNonZero() const;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Daemon/BaseDaemon.h>
|
||||
#include <Daemon/SentryWriter.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/Serializations/SerializationDate32.h>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -14,6 +16,11 @@ SerializationPtr DataTypeDate32::doGetDefaultSerialization() const
|
||||
return std::make_shared<SerializationDate32>();
|
||||
}
|
||||
|
||||
Field DataTypeDate32::getDefault() const
|
||||
{
|
||||
return -static_cast<Int64>(DateLUT::instance().getDayNumOffsetEpoch());
|
||||
}
|
||||
|
||||
void registerDataTypeDate32(DataTypeFactory & factory)
|
||||
{
|
||||
factory.registerSimpleDataType(
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Field.h>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <DataTypes/DataTypeNumberBase.h>
|
||||
|
||||
namespace DB
|
||||
@ -15,10 +14,7 @@ public:
|
||||
TypeIndex getColumnType() const override { return TypeIndex::Int32; }
|
||||
const char * getFamilyName() const override { return family_name; }
|
||||
|
||||
Field getDefault() const override
|
||||
{
|
||||
return -static_cast<Int64>(DateLUT::instance().getDayNumOffsetEpoch());
|
||||
}
|
||||
Field getDefault() const override;
|
||||
|
||||
bool canBeUsedAsVersion() const override { return true; }
|
||||
bool canBeInsideNullable() const override { return true; }
|
||||
|
@ -68,7 +68,7 @@ std::pair<String, StoragePtr> createTableFromAST(
|
||||
ast_create_query.setDatabase(database_name);
|
||||
|
||||
if (ast_create_query.select && ast_create_query.isView())
|
||||
ApplyWithSubqueryVisitor().visit(*ast_create_query.select);
|
||||
ApplyWithSubqueryVisitor::visit(*ast_create_query.select);
|
||||
|
||||
if (ast_create_query.as_table_function)
|
||||
{
|
||||
|
@ -171,7 +171,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
|
||||
auto ast = parseQueryFromMetadata(log, getContext(), full_path.string(), /*throw_on_error*/ true, /*remove_empty*/ false);
|
||||
if (ast)
|
||||
{
|
||||
FunctionNameNormalizer().visit(ast.get());
|
||||
FunctionNameNormalizer::visit(ast.get());
|
||||
auto * create_query = ast->as<ASTCreateQuery>();
|
||||
/// NOTE No concurrent writes are possible during database loading
|
||||
create_query->setDatabase(TSA_SUPPRESS_WARNING_FOR_READ(database_name));
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Storages/PostgreSQL/StorageMaterializedPostgreSQL.h>
|
||||
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
|
||||
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/PoolId.h>
|
||||
|
@ -448,7 +448,7 @@ std::vector<StorageID> TablesDependencyGraph::getTables() const
|
||||
void TablesDependencyGraph::mergeWith(const TablesDependencyGraph & other)
|
||||
{
|
||||
for (const auto & other_node : other.nodes)
|
||||
addDependencies(other_node->storage_id, other.getDependencies(*other_node));
|
||||
addDependencies(other_node->storage_id, TablesDependencyGraph::getDependencies(*other_node));
|
||||
}
|
||||
|
||||
|
||||
|
@ -413,7 +413,7 @@ void FlatDictionary::blockToAttributes(const Block & block)
|
||||
const auto keys_column = block.safeGetByPosition(0).column;
|
||||
|
||||
DictionaryKeysArenaHolder<DictionaryKeyType::Simple> arena_holder;
|
||||
DictionaryKeysExtractor<DictionaryKeyType::Simple> keys_extractor({ keys_column }, arena_holder.getComplexKeyArena());
|
||||
DictionaryKeysExtractor<DictionaryKeyType::Simple> keys_extractor({ keys_column }, arena_holder.getComplexKeyArena()); /// NOLINT(readability-static-accessed-through-instance)
|
||||
size_t keys_size = keys_extractor.getKeysSize();
|
||||
|
||||
static constexpr size_t key_offset = 1;
|
||||
|
@ -214,7 +214,7 @@ public:
|
||||
static constexpr Coord kEps = 1e-4f;
|
||||
|
||||
private:
|
||||
std::unique_ptr<ICell<ReturnCell>> root = nullptr;
|
||||
std::unique_ptr<ICell<ReturnCell>> root;
|
||||
Coord min_x = 0, min_y = 0;
|
||||
Coord max_x = 0, max_y = 0;
|
||||
const size_t k_min_intersections;
|
||||
|
@ -581,7 +581,7 @@ try
|
||||
auto disk_ptr = std::static_pointer_cast<DiskLocal>(shared_from_this());
|
||||
auto tmp_file = std::make_unique<TemporaryFileOnDisk>(disk_ptr);
|
||||
auto buf = std::make_unique<WriteBufferFromTemporaryFile>(std::move(tmp_file));
|
||||
buf->write(data.data, data.PAGE_SIZE_IN_BYTES);
|
||||
buf->write(data.data, DiskWriteCheckData::PAGE_SIZE_IN_BYTES);
|
||||
buf->finalize();
|
||||
buf->sync();
|
||||
}
|
||||
|
@ -65,7 +65,20 @@ WriteBufferFromAzureBlobStorage::WriteBufferFromAzureBlobStorage(
|
||||
|
||||
WriteBufferFromAzureBlobStorage::~WriteBufferFromAzureBlobStorage()
|
||||
{
|
||||
finalize();
|
||||
LOG_TRACE(limitedLog, "Close WriteBufferFromAzureBlobStorage. {}.", blob_path);
|
||||
|
||||
/// That destructor could be call with finalized=false in case of exceptions
|
||||
if (!finalized)
|
||||
{
|
||||
LOG_INFO(
|
||||
log,
|
||||
"WriteBufferFromAzureBlobStorage is not finalized in destructor. "
|
||||
"The file might not be written to AzureBlobStorage. "
|
||||
"{}.",
|
||||
blob_path);
|
||||
}
|
||||
|
||||
task_tracker->safeWaitAll();
|
||||
}
|
||||
|
||||
void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func, size_t num_tries, size_t cost)
|
||||
@ -102,9 +115,13 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBufferFromAzureBlobStorage::finalizeImpl()
|
||||
void WriteBufferFromAzureBlobStorage::preFinalize()
|
||||
{
|
||||
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
|
||||
if (is_prefinalized)
|
||||
return;
|
||||
|
||||
// This function should not be run again
|
||||
is_prefinalized = true;
|
||||
|
||||
/// If there is only one block and size is less than or equal to max_single_part_upload_size
|
||||
/// then we use single part upload instead of multi part upload
|
||||
@ -113,6 +130,7 @@ void WriteBufferFromAzureBlobStorage::finalizeImpl()
|
||||
size_t data_size = size_t(position() - memory.data());
|
||||
if (data_size <= max_single_part_upload_size)
|
||||
{
|
||||
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
|
||||
Azure::Core::IO::MemoryBodyStream memory_stream(reinterpret_cast<const uint8_t *>(memory.data()), data_size);
|
||||
execWithRetry([&](){ block_blob_client.Upload(memory_stream); }, max_unexpected_write_error_retries, data_size);
|
||||
LOG_TRACE(log, "Committed single block for blob `{}`", blob_path);
|
||||
@ -120,14 +138,23 @@ void WriteBufferFromAzureBlobStorage::finalizeImpl()
|
||||
}
|
||||
}
|
||||
|
||||
writePart();
|
||||
}
|
||||
|
||||
execWithRetry([this](){ next(); }, max_unexpected_write_error_retries);
|
||||
void WriteBufferFromAzureBlobStorage::finalizeImpl()
|
||||
{
|
||||
LOG_TRACE(log, "finalizeImpl WriteBufferFromAzureBlobStorage {}", blob_path);
|
||||
|
||||
if (!is_prefinalized)
|
||||
preFinalize();
|
||||
|
||||
if (!block_ids.empty())
|
||||
{
|
||||
task_tracker->waitAll();
|
||||
|
||||
auto block_blob_client = blob_container_client->GetBlockBlobClient(blob_path);
|
||||
execWithRetry([&](){ block_blob_client.CommitBlockList(block_ids); }, max_unexpected_write_error_retries);
|
||||
|
||||
LOG_TRACE(log, "Committed {} blocks for blob `{}`", block_ids.size(), blob_path);
|
||||
}
|
||||
}
|
||||
|
||||
void WriteBufferFromAzureBlobStorage::nextImpl()
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
~WriteBufferFromAzureBlobStorage() override;
|
||||
|
||||
void nextImpl() override;
|
||||
|
||||
void preFinalize() override;
|
||||
std::string getFileName() const override { return blob_path; }
|
||||
void sync() override { next(); }
|
||||
|
||||
@ -65,6 +65,9 @@ private:
|
||||
const std::string blob_path;
|
||||
const WriteSettings write_settings;
|
||||
|
||||
/// Track that prefinalize() is called only once
|
||||
bool is_prefinalized = false;
|
||||
|
||||
AzureClientPtr blob_container_client;
|
||||
std::vector<std::string> block_ids;
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Block;
|
||||
struct JSONInferenceInfo;
|
||||
|
||||
namespace JSONUtils
|
||||
|
@ -1527,7 +1527,7 @@ struct ToMillisecondImpl
|
||||
|
||||
static UInt16 execute(const DateTime64 & datetime64, Int64 scale_multiplier, const DateLUTImpl & time_zone)
|
||||
{
|
||||
return time_zone.toMillisecond<DateTime64>(datetime64, scale_multiplier);
|
||||
return time_zone.toMillisecond(datetime64, scale_multiplier);
|
||||
}
|
||||
|
||||
static UInt16 execute(UInt32, const DateLUTImpl &)
|
||||
|
@ -1,66 +1,67 @@
|
||||
#include <type_traits>
|
||||
|
||||
#include <IO/WriteBufferFromVector.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/parseDateTimeBestEffort.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeFixedString.h>
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
#include <Columns/ColumnLowCardinality.h>
|
||||
#include <Columns/ColumnMap.h>
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Columns/ColumnObject.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnStringHelpers.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Columns/ColumnVariant.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Core/AccurateComparison.h>
|
||||
#include <Core/Types.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDate32.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeFixedString.h>
|
||||
#include <DataTypes/DataTypeIPv4andIPv6.h>
|
||||
#include <DataTypes/DataTypeInterval.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
#include <DataTypes/DataTypeObject.h>
|
||||
#include <DataTypes/ObjectUtils.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeNested.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeObject.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypeVariant.h>
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/ObjectUtils.h>
|
||||
#include <DataTypes/Serializations/SerializationDecimal.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Columns/ColumnMap.h>
|
||||
#include <Columns/ColumnObject.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/ColumnVariant.h>
|
||||
#include <Columns/ColumnStringHelpers.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/Concepts.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Core/AccurateComparison.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/IFunctionAdaptors.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/DateTimeTransforms.h>
|
||||
#include <Functions/toFixedString.h>
|
||||
#include <Functions/TransformDateTime64.h>
|
||||
#include <Functions/FunctionsCodingIP.h>
|
||||
#include <Functions/CastOverloadResolver.h>
|
||||
#include <Functions/DateTimeTransforms.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Functions/FunctionsCodingIP.h>
|
||||
#include <Functions/IFunctionAdaptors.h>
|
||||
#include <Functions/TransformDateTime64.h>
|
||||
#include <Functions/castTypeToEither.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <Columns/ColumnLowCardinality.h>
|
||||
#include <Functions/toFixedString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/WriteBufferFromVector.h>
|
||||
#include <IO/parseDateTimeBestEffort.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/Concepts.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <DataTypes/DataTypeIPv4andIPv6.h>
|
||||
#include <Common/IPv6ToBinary.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/quoteString.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -1139,7 +1139,7 @@ private:
|
||||
getName());
|
||||
|
||||
auto dictionary = helper.getDictionary(arguments[0].column);
|
||||
const auto & hierarchical_attribute = helper.getDictionaryHierarchicalAttribute(dictionary);
|
||||
const auto & hierarchical_attribute = FunctionDictHelper::getDictionaryHierarchicalAttribute(dictionary);
|
||||
|
||||
return std::make_shared<DataTypeArray>(removeNullable(hierarchical_attribute.type));
|
||||
}
|
||||
@ -1150,7 +1150,7 @@ private:
|
||||
return result_type->createColumn();
|
||||
|
||||
auto dictionary = helper.getDictionary(arguments[0].column);
|
||||
const auto & hierarchical_attribute = helper.getDictionaryHierarchicalAttribute(dictionary);
|
||||
const auto & hierarchical_attribute = FunctionDictHelper::getDictionaryHierarchicalAttribute(dictionary);
|
||||
|
||||
auto key_column = ColumnWithTypeAndName{arguments[1].column, arguments[1].type, arguments[1].name};
|
||||
auto key_column_casted = castColumnAccurate(key_column, removeNullable(hierarchical_attribute.type));
|
||||
@ -1205,7 +1205,7 @@ private:
|
||||
return result_type->createColumn();
|
||||
|
||||
auto dictionary = helper.getDictionary(arguments[0].column);
|
||||
const auto & hierarchical_attribute = helper.getDictionaryHierarchicalAttribute(dictionary);
|
||||
const auto & hierarchical_attribute = FunctionDictHelper::getDictionaryHierarchicalAttribute(dictionary);
|
||||
|
||||
auto key_column = ColumnWithTypeAndName{arguments[1].column->convertToFullColumnIfConst(), arguments[1].type, arguments[2].name};
|
||||
auto in_key_column = ColumnWithTypeAndName{arguments[2].column->convertToFullColumnIfConst(), arguments[2].type, arguments[2].name};
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <Columns/ColumnNullable.h>
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Columns/ColumnVariant.h>
|
||||
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
@ -35,6 +36,8 @@
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeVariant.h>
|
||||
#include <DataTypes/Serializations/SerializationVariant.h>
|
||||
#include <DataTypes/Serializations/SerializationDecimal.h>
|
||||
|
||||
#include <Functions/IFunction.h>
|
||||
@ -257,7 +260,7 @@ private:
|
||||
}
|
||||
case MoveType::Key:
|
||||
{
|
||||
key = (*arguments[j + 1].column).getDataAt(row).toView();
|
||||
key = arguments[j + 1].column->getDataAt(row).toView();
|
||||
if (!moveToElementByKey<JSONParser>(res_element, key))
|
||||
return false;
|
||||
break;
|
||||
@ -334,6 +337,26 @@ private:
|
||||
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class JSONExtractImpl;
|
||||
|
||||
template <typename T>
|
||||
class JSONExtractKeysAndValuesImpl;
|
||||
|
||||
/**
|
||||
* Functions JSONExtract and JSONExtractKeysAndValues force the return type - it is specified in the last argument.
|
||||
* For example - `SELECT JSONExtract(materialize('{"a": 131231, "b": 1234}'), 'b', 'LowCardinality(FixedString(4))')`
|
||||
* But by default ClickHouse decides on its own whether the return type will be LowCardinality based on the types of
|
||||
* input arguments.
|
||||
* And for these specific functions we cannot rely on this mechanism, so these functions have their own implementation -
|
||||
* just convert all of the LowCardinality input columns to full ones, execute and wrap the resulting column in LowCardinality
|
||||
* if needed.
|
||||
*/
|
||||
template <template<typename> typename Impl>
|
||||
constexpr bool functionForcesTheReturnType()
|
||||
{
|
||||
return std::is_same_v<Impl<void>, JSONExtractImpl<void>> || std::is_same_v<Impl<void>, JSONExtractKeysAndValuesImpl<void>>;
|
||||
}
|
||||
|
||||
template <typename Name, template<typename> typename Impl>
|
||||
class ExecutableFunctionJSON : public IExecutableFunction
|
||||
@ -348,18 +371,51 @@ public:
|
||||
String getName() const override { return Name::name; }
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override
|
||||
{
|
||||
return !functionForcesTheReturnType<Impl>();
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
{
|
||||
if (null_presence.has_null_constant)
|
||||
return result_type->createColumnConstWithDefaultValue(input_rows_count);
|
||||
|
||||
if constexpr (functionForcesTheReturnType<Impl>())
|
||||
{
|
||||
ColumnsWithTypeAndName columns_without_low_cardinality = arguments;
|
||||
|
||||
for (auto & column : columns_without_low_cardinality)
|
||||
{
|
||||
column.column = recursiveRemoveLowCardinality(column.column);
|
||||
column.type = recursiveRemoveLowCardinality(column.type);
|
||||
}
|
||||
|
||||
ColumnsWithTypeAndName temporary_columns = null_presence.has_nullable ? createBlockWithNestedColumns(columns_without_low_cardinality) : columns_without_low_cardinality;
|
||||
ColumnPtr temporary_result = chooseAndRunJSONParser(temporary_columns, json_return_type, input_rows_count);
|
||||
|
||||
if (null_presence.has_nullable)
|
||||
temporary_result = wrapInNullable(temporary_result, columns_without_low_cardinality, result_type, input_rows_count);
|
||||
|
||||
if (result_type->lowCardinality())
|
||||
temporary_result = recursiveLowCardinalityTypeConversion(temporary_result, json_return_type, result_type);
|
||||
|
||||
return temporary_result;
|
||||
}
|
||||
else
|
||||
{
|
||||
ColumnsWithTypeAndName temporary_columns = null_presence.has_nullable ? createBlockWithNestedColumns(arguments) : arguments;
|
||||
ColumnPtr temporary_result = chooseAndRunJSONParser(temporary_columns, json_return_type, input_rows_count);
|
||||
|
||||
if (null_presence.has_nullable)
|
||||
return wrapInNullable(temporary_result, arguments, result_type, input_rows_count);
|
||||
temporary_result = wrapInNullable(temporary_result, arguments, result_type, input_rows_count);
|
||||
|
||||
if (result_type->lowCardinality())
|
||||
temporary_result = recursiveLowCardinalityTypeConversion(temporary_result, json_return_type, result_type);
|
||||
|
||||
return temporary_result;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
@ -429,7 +485,6 @@ private:
|
||||
DataTypePtr json_return_type;
|
||||
};
|
||||
|
||||
|
||||
/// We use IFunctionOverloadResolver instead of IFunction to handle non-default NULL processing.
|
||||
/// Both NULL and JSON NULL should generate NULL value. If any argument is NULL, return NULL.
|
||||
template <typename Name, template<typename> typename Impl>
|
||||
@ -450,6 +505,10 @@ public:
|
||||
bool isVariadic() const override { return true; }
|
||||
size_t getNumberOfArguments() const override { return 0; }
|
||||
bool useDefaultImplementationForNulls() const override { return false; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override
|
||||
{
|
||||
return !functionForcesTheReturnType<Impl>();
|
||||
}
|
||||
|
||||
FunctionBasePtr build(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
@ -481,7 +540,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct NameJSONHas { static constexpr auto name{"JSONHas"}; };
|
||||
struct NameIsValidJSON { static constexpr auto name{"isValidJSON"}; };
|
||||
struct NameJSONLength { static constexpr auto name{"JSONLength"}; };
|
||||
@ -1238,6 +1296,35 @@ struct JSONExtractTree
|
||||
std::unique_ptr<Node> value;
|
||||
};
|
||||
|
||||
class VariantNode : public Node
|
||||
{
|
||||
public:
|
||||
VariantNode(std::vector<std::unique_ptr<Node>> variant_nodes_, std::vector<size_t> order_) : variant_nodes(std::move(variant_nodes_)), order(std::move(order_)) { }
|
||||
|
||||
bool insertResultToColumn(IColumn & dest, const Element & element) override
|
||||
{
|
||||
auto & column_variant = assert_cast<ColumnVariant &>(dest);
|
||||
for (size_t i : order)
|
||||
{
|
||||
auto & variant = column_variant.getVariantByGlobalDiscriminator(i);
|
||||
if (variant_nodes[i]->insertResultToColumn(variant, element))
|
||||
{
|
||||
column_variant.getLocalDiscriminators().push_back(column_variant.localDiscriminatorByGlobal(i));
|
||||
column_variant.getOffsets().push_back(variant.size() - 1);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::unique_ptr<Node>> variant_nodes;
|
||||
/// Order in which we should try variants nodes.
|
||||
/// For example, String should be always the last one.
|
||||
std::vector<size_t> order;
|
||||
};
|
||||
|
||||
static std::unique_ptr<Node> build(const char * function_name, const DataTypePtr & type)
|
||||
{
|
||||
switch (type->getTypeId())
|
||||
@ -1314,6 +1401,16 @@ struct JSONExtractTree
|
||||
const auto & value_type = map_type.getValueType();
|
||||
return std::make_unique<MapNode>(build(function_name, key_type), build(function_name, value_type));
|
||||
}
|
||||
case TypeIndex::Variant:
|
||||
{
|
||||
const auto & variant_type = static_cast<const DataTypeVariant &>(*type);
|
||||
const auto & variants = variant_type.getVariants();
|
||||
std::vector<std::unique_ptr<Node>> variant_nodes;
|
||||
variant_nodes.reserve(variants.size());
|
||||
for (const auto & variant : variants)
|
||||
variant_nodes.push_back(build(function_name, variant));
|
||||
return std::make_unique<VariantNode>(std::move(variant_nodes), SerializationVariant::getVariantsDeserializeTextOrder(variants));
|
||||
}
|
||||
default:
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Function {} doesn't support the return type schema: {}",
|
||||
|
@ -1,8 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/DateLUT.h>
|
||||
#include <DataTypes/DataTypeInterval.h>
|
||||
#include <Functions/IFunction.h>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -144,7 +144,7 @@ struct NumericArraySource : public ArraySourceImpl<NumericArraySource<T>>
|
||||
#pragma clang diagnostic ignored "-Wsuggest-override"
|
||||
#pragma clang diagnostic ignored "-Wsuggest-destructor-override"
|
||||
|
||||
/// NOLINTBEGIN(hicpp-use-override)
|
||||
/// NOLINTBEGIN(hicpp-use-override, modernize-use-override)
|
||||
|
||||
template <typename Base>
|
||||
struct ConstSource : public Base
|
||||
@ -233,7 +233,7 @@ struct ConstSource : public Base
|
||||
}
|
||||
};
|
||||
|
||||
/// NOLINTEND(hicpp-use-override)
|
||||
/// NOLINTEND(hicpp-use-override, modernize-use-override)
|
||||
|
||||
#pragma clang diagnostic pop
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user