mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Merge branch 'ClickHouse:master' into complex_map_key
This commit is contained in:
commit
7df85e6176
@ -96,7 +96,6 @@ Checks: [
|
||||
'-modernize-use-default-member-init',
|
||||
'-modernize-use-emplace',
|
||||
'-modernize-use-nodiscard',
|
||||
'-modernize-use-override',
|
||||
'-modernize-use-trailing-return-type',
|
||||
|
||||
'-performance-inefficient-string-concatenation',
|
||||
|
@ -123,7 +123,6 @@
|
||||
* Something was wrong with Apache Hive, which is experimental and not supported. [#60262](https://github.com/ClickHouse/ClickHouse/pull/60262) ([shanfengp](https://github.com/Aed-p)).
|
||||
* An improvement for experimental parallel replicas: force reanalysis if parallel replicas changed [#60362](https://github.com/ClickHouse/ClickHouse/pull/60362) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix usage of plain metadata type with new disks configuration option [#60396](https://github.com/ClickHouse/ClickHouse/pull/60396) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Don't allow to set max_parallel_replicas to 0 as it doesn't make sense [#60430](https://github.com/ClickHouse/ClickHouse/pull/60430) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Try to fix logical error 'Cannot capture column because it has incompatible type' in mapContainsKeyLike [#60451](https://github.com/ClickHouse/ClickHouse/pull/60451) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
@ -13,8 +13,6 @@
|
||||
#include <tuple>
|
||||
#include <limits>
|
||||
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
|
||||
/// Use same extended double for all platforms
|
||||
@ -22,6 +20,7 @@
|
||||
#define CONSTEXPR_FROM_DOUBLE constexpr
|
||||
using FromDoubleIntermediateType = long double;
|
||||
#else
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
|
||||
#define CONSTEXPR_FROM_DOUBLE
|
||||
@ -309,6 +308,13 @@ struct integer<Bits, Signed>::_impl
|
||||
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
|
||||
static_assert(std::is_same_v<T, double> || std::is_same_v<T, FromDoubleIntermediateType>);
|
||||
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
|
||||
#if (LDBL_MANT_DIG == 64)
|
||||
if (!std::isfinite(t))
|
||||
{
|
||||
self = 0;
|
||||
return;
|
||||
}
|
||||
#else
|
||||
if constexpr (std::is_same_v<T, double>)
|
||||
{
|
||||
if (!std::isfinite(t))
|
||||
@ -325,6 +331,7 @@ struct integer<Bits, Signed>::_impl
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
const T alpha = t / static_cast<T>(max_int);
|
||||
|
||||
|
@ -314,13 +314,13 @@ static int read_unicode(json_stream *json)
|
||||
|
||||
if (l < 0xdc00 || l > 0xdfff) {
|
||||
json_error(json, "invalid surrogate pair continuation \\u%04lx out "
|
||||
"of range (dc00-dfff)", l);
|
||||
"of range (dc00-dfff)", (unsigned long)l);
|
||||
return -1;
|
||||
}
|
||||
|
||||
cp = ((h - 0xd800) * 0x400) + ((l - 0xdc00) + 0x10000);
|
||||
} else if (cp >= 0xdc00 && cp <= 0xdfff) {
|
||||
json_error(json, "dangling surrogate \\u%04lx", cp);
|
||||
json_error(json, "dangling surrogate \\u%04lx", (unsigned long)cp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 4a12f99dfc9d47c687ff7700b927cc76856225d1
|
||||
Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0
|
@ -32,6 +32,7 @@ set(SRCS
|
||||
"${LIBRARY_DIR}/src/handle_custom_notification.cxx"
|
||||
"${LIBRARY_DIR}/src/handle_vote.cxx"
|
||||
"${LIBRARY_DIR}/src/launcher.cxx"
|
||||
"${LIBRARY_DIR}/src/log_entry.cxx"
|
||||
"${LIBRARY_DIR}/src/srv_config.cxx"
|
||||
"${LIBRARY_DIR}/src/snapshot_sync_req.cxx"
|
||||
"${LIBRARY_DIR}/src/snapshot_sync_ctx.cxx"
|
||||
@ -50,6 +51,12 @@ else()
|
||||
target_compile_definitions(_nuraft PRIVATE USE_BOOST_ASIO=1 BOOST_ASIO_STANDALONE=1)
|
||||
endif()
|
||||
|
||||
target_link_libraries (_nuraft PRIVATE clickhouse_common_io)
|
||||
# We must have it PUBLIC here because some headers which depend on it directly
|
||||
# included in clickhouse
|
||||
target_compile_definitions(_nuraft PUBLIC USE_CLICKHOUSE_THREADS=1)
|
||||
MESSAGE(STATUS "Will use clickhouse threads for NuRaft")
|
||||
|
||||
target_include_directories (_nuraft SYSTEM PRIVATE "${LIBRARY_DIR}/include/libnuraft")
|
||||
# for some reason include "asio.h" directly without "boost/" prefix.
|
||||
target_include_directories (_nuraft SYSTEM PRIVATE "${ClickHouse_SOURCE_DIR}/contrib/boost/boost")
|
||||
|
@ -16,6 +16,8 @@ ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb
|
||||
dpkg -i package_folder/clickhouse-library-bridge_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
|
@ -79,7 +79,7 @@ The supported formats are:
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✗ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
@ -1487,7 +1487,7 @@ Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000
|
||||
- [output_format_pretty_max_value_width](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_value_width) - Maximum width of value to display in Pretty formats. If greater - it will be cut. Default value - `10000`.
|
||||
- [output_format_pretty_color](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`.
|
||||
- [output_format_pretty_grid_charset](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `false`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `true`.
|
||||
|
||||
## RowBinary {#rowbinary}
|
||||
|
||||
@ -2465,7 +2465,7 @@ Result:
|
||||
|
||||
## Npy {#data-format-npy}
|
||||
|
||||
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
|
||||
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
|
||||
| Npy type | ClickHouse type |
|
||||
|:--------:|:---------------:|
|
||||
| b1 | UInt8 |
|
||||
|
@ -436,7 +436,7 @@ Default: 0
|
||||
Restriction on dropping partitions.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you can’t drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query.
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
Default value: 50 GB.
|
||||
The value 0 means that you can drop partitions without any restrictions.
|
||||
|
||||
@ -518,7 +518,7 @@ Restriction on deleting tables.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
The value 0 means that you can delete all tables without any restrictions.
|
||||
@ -1570,7 +1570,7 @@ Restriction on deleting tables.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
|
||||
@ -1588,7 +1588,7 @@ Restriction on dropping partitions.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you can’t drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
|
||||
|
@ -1642,7 +1642,7 @@ Possible values:
|
||||
- 0 — Output without row numbers.
|
||||
- 1 — Output with row numbers.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `1`.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -36,7 +36,7 @@ E.g. configuration option
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
```
|
||||
|
||||
@ -47,7 +47,7 @@ is equal to configuration (from `24.1`):
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>local</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
```
|
||||
|
||||
@ -56,7 +56,7 @@ Configuration
|
||||
<s3_plain>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -67,7 +67,7 @@ is equal to
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>plain</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -79,7 +79,7 @@ Example of full storage configuration will look like:
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
@ -105,7 +105,7 @@ Starting with 24.1 clickhouse version, it can also look like:
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>local</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
@ -324,7 +324,7 @@ Configuration:
|
||||
<s3_plain>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -337,7 +337,7 @@ Configuration:
|
||||
<object_storage_type>azure</object_storage_type>
|
||||
<metadata_type>plain</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
|
@ -47,7 +47,7 @@ An example:
|
||||
<engine>ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024</engine>
|
||||
-->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<max_size_rows>1048576</max_size>
|
||||
<max_size_rows>1048576</max_size_rows>
|
||||
<reserved_size_rows>8192</reserved_size_rows>
|
||||
<buffer_size_rows_flush_threshold>524288</buffer_size_rows_flush_threshold>
|
||||
<flush_on_crash>false</flush_on_crash>
|
||||
|
@ -483,7 +483,7 @@ Where:
|
||||
|
||||
- `r1`- the number of unique visitors who visited the site during 2020-01-01 (the `cond1` condition).
|
||||
- `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions).
|
||||
- `r3`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions).
|
||||
- `r3`- the number of unique visitors who visited the site during a specific time period on 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions).
|
||||
|
||||
## uniqUpTo(N)(x)
|
||||
|
||||
|
@ -36,9 +36,9 @@ You can explicitly set a time zone for `DateTime`-type columns when creating a t
|
||||
|
||||
The [clickhouse-client](../../interfaces/cli.md) applies the server time zone by default if a time zone isn’t explicitly set when initializing the data type. To use the client time zone, run `clickhouse-client` with the `--use_client_time_zone` parameter.
|
||||
|
||||
ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings.md#settings-date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function.
|
||||
ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings-formats.md#date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function.
|
||||
|
||||
When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings.md#settings-date_time_input_format) setting.
|
||||
When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings-formats.md#date_time_input_format) setting.
|
||||
|
||||
## Examples
|
||||
|
||||
@ -147,8 +147,8 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
|
||||
- [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md)
|
||||
- [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Functions for working with arrays](../../sql-reference/functions/array-functions.md)
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#settings-date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#settings-date_time_output_format)
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format)
|
||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
||||
|
@ -190,22 +190,67 @@ SELECT toTypeName(variantType(v)) FROM test LIMIT 1;
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Conversion between Variant column and other columns
|
||||
## Conversion between a Variant column and other columns
|
||||
|
||||
There are 3 possible conversions that can be performed with Variant column.
|
||||
There are 4 possible conversions that can be performed with a column of type `Variant`.
|
||||
|
||||
### Converting an ordinary column to a Variant column
|
||||
### Converting a String column to a Variant column
|
||||
|
||||
It is possible to convert ordinary column with type `T` to a `Variant` column containing this type:
|
||||
Conversion from `String` to `Variant` is performed by parsing a value of `Variant` type from the string value:
|
||||
|
||||
```sql
|
||||
SELECT toTypeName(variant) as type_name, 'Hello, World!'::Variant(UInt64, String, Array(UInt64)) as variant;
|
||||
SELECT '42'::Variant(String, UInt64) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```text
|
||||
┌─type_name──────────────────────────────┬─variant───────┐
|
||||
│ Variant(Array(UInt64), String, UInt64) │ Hello, World! │
|
||||
└────────────────────────────────────────┴───────────────┘
|
||||
┌─variant─┬─variant_type─┐
|
||||
│ 42 │ UInt64 │
|
||||
└─────────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT '[1, 2, 3]'::Variant(String, Array(UInt64)) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variant─┬─variant_type──┐
|
||||
│ [1,2,3] │ Array(UInt64) │
|
||||
└─────────┴───────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT CAST(map('key1', '42', 'key2', 'true', 'key3', '2020-01-01'), 'Map(String, Variant(UInt64, Bool, Date))') as map_of_variants, mapApply((k, v) -> (k, variantType(v)), map_of_variants) as map_of_variant_types```
|
||||
```
|
||||
|
||||
```text
|
||||
┌─map_of_variants─────────────────────────────┬─map_of_variant_types──────────────────────────┐
|
||||
│ {'key1':42,'key2':true,'key3':'2020-01-01'} │ {'key1':'UInt64','key2':'Bool','key3':'Date'} │
|
||||
└─────────────────────────────────────────────┴───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Converting an ordinary column to a Variant column
|
||||
|
||||
It is possible to convert an ordinary column with type `T` to a `Variant` column containing this type:
|
||||
|
||||
```sql
|
||||
SELECT toTypeName(variant) as type_name, [1,2,3]::Array(UInt64)::Variant(UInt64, String, Array(UInt64)) as variant, variantType(variant) as variant_name
|
||||
```
|
||||
|
||||
```text
|
||||
┌─type_name──────────────────────────────┬─variant─┬─variant_name──┐
|
||||
│ Variant(Array(UInt64), String, UInt64) │ [1,2,3] │ Array(UInt64) │
|
||||
└────────────────────────────────────────┴─────────┴───────────────┘
|
||||
```
|
||||
|
||||
Note: converting from `String` type is always performed through parsing, if you need to convert `String` column to `String` variant of a `Variant` without parsing, you can do the following:
|
||||
```sql
|
||||
SELECT '[1, 2, 3]'::Variant(String)::Variant(String, Array(UInt64), UInt64) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```sql
|
||||
┌─variant───┬─variant_type─┐
|
||||
│ [1, 2, 3] │ String │
|
||||
└───────────┴──────────────┘
|
||||
```
|
||||
|
||||
### Converting a Variant column to an ordinary column
|
||||
@ -395,3 +440,37 @@ SELECT v, variantType(v) FROM test ORDER by v;
|
||||
│ 100 │ UInt32 │
|
||||
└─────┴────────────────┘
|
||||
```
|
||||
|
||||
## JSONExtract functions with Variant
|
||||
|
||||
All `JSONExtract*` functions support `Variant` type:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Variant(UInt32, String, Array(UInt32))') AS variant, variantType(variant) AS variant_type;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variant─┬─variant_type──┐
|
||||
│ [1,2,3] │ Array(UInt32) │
|
||||
└─────────┴───────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_variants, mapApply((k, v) -> (k, variantType(v)), map_of_variants) AS map_of_variant_types
|
||||
```
|
||||
|
||||
```text
|
||||
┌─map_of_variants──────────────────┬─map_of_variant_types────────────────────────────┐
|
||||
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
|
||||
└──────────────────────────────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS variants, arrayMap(x -> (x.1, variantType(x.2)), variants) AS variant_types
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variants───────────────────────────────┬─variant_types─────────────────────────────────────────┐
|
||||
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
|
||||
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -1670,7 +1670,7 @@ Like [fromDaysSinceYearZero](#fromDaysSinceYearZero) but returns a [Date32](../.
|
||||
|
||||
## age
|
||||
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 microsecond.
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 nanosecond.
|
||||
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
|
||||
|
||||
For an alternative to `age`, see function `date\_diff`.
|
||||
@ -1686,16 +1686,17 @@ age('unit', startdate, enddate, [timezone])
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `microsecond` `microseconds` `us` `u`
|
||||
- `millisecond` `milliseconds` `ms`
|
||||
- `second` `seconds` `ss` `s`
|
||||
- `minute` `minutes` `mi` `n`
|
||||
- `hour` `hours` `hh` `h`
|
||||
- `day` `days` `dd` `d`
|
||||
- `week` `weeks` `wk` `ww`
|
||||
- `month` `months` `mm` `m`
|
||||
- `quarter` `quarters` `qq` `q`
|
||||
- `year` `years` `yyyy` `yy`
|
||||
- `nanosecond`, `nanoseconds`, `ns`
|
||||
- `microsecond`, `microseconds`, `us`, `u`
|
||||
- `millisecond`, `milliseconds`, `ms`
|
||||
- `second`, `seconds`, `ss`, `s`
|
||||
- `minute`, `minutes`, `mi`, `n`
|
||||
- `hour`, `hours`, `hh`, `h`
|
||||
- `day`, `days`, `dd`, `d`
|
||||
- `week`, `weeks`, `wk`, `ww`
|
||||
- `month`, `months`, `mm`, `m`
|
||||
- `quarter`, `quarters`, `qq`, `q`
|
||||
- `year`, `years`, `yyyy`, `yy`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
@ -1763,16 +1764,17 @@ Aliases: `dateDiff`, `DATE_DIFF`, `timestampDiff`, `timestamp_diff`, `TIMESTAMP_
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `microsecond` `microseconds` `us` `u`
|
||||
- `millisecond` `milliseconds` `ms`
|
||||
- `second` `seconds` `ss` `s`
|
||||
- `minute` `minutes` `mi` `n`
|
||||
- `hour` `hours` `hh` `h`
|
||||
- `day` `days` `dd` `d`
|
||||
- `week` `weeks` `wk` `ww`
|
||||
- `month` `months` `mm` `m`
|
||||
- `quarter` `quarters` `qq` `q`
|
||||
- `year` `years` `yyyy` `yy`
|
||||
- `nanosecond`, `nanoseconds`, `ns`
|
||||
- `microsecond`, `microseconds`, `us`, `u`
|
||||
- `millisecond`, `milliseconds`, `ms`
|
||||
- `second`, `seconds`, `ss`, `s`
|
||||
- `minute`, `minutes`, `mi`, `n`
|
||||
- `hour`, `hours`, `hh`, `h`
|
||||
- `day`, `days`, `dd`, `d`
|
||||
- `week`, `weeks`, `wk`, `ww`
|
||||
- `month`, `months`, `mm`, `m`
|
||||
- `quarter`, `quarters`, `qq`, `q`
|
||||
- `year`, `years`, `yyyy`, `yy`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
|
@ -543,12 +543,64 @@ You can get similar result by using the [ternary operator](../../sql-reference/f
|
||||
|
||||
Returns 1 if the Float32 and Float64 argument is NaN, otherwise this function 0.
|
||||
|
||||
## hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’)
|
||||
## hasColumnInTable
|
||||
|
||||
Given the database name, the table name, and the column name as constant strings, returns 1 if the given column exists, otherwise 0.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
hasColumnInTable(\[‘hostname’\[, ‘username’\[, ‘password’\]\],\] ‘database’, ‘table’, ‘column’)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `database` : name of the database. [String literal](../syntax#syntax-string-literal)
|
||||
- `table` : name of the table. [String literal](../syntax#syntax-string-literal)
|
||||
- `column` : name of the column. [String literal](../syntax#syntax-string-literal)
|
||||
- `hostname` : remote server name to perform the check on. [String literal](../syntax#syntax-string-literal)
|
||||
- `username` : username for remote server. [String literal](../syntax#syntax-string-literal)
|
||||
- `password` : password for remote server. [String literal](../syntax#syntax-string-literal)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `1` if the given column exists.
|
||||
- `0`, otherwise.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
Given the database name, the table name, and the column name as constant strings, returns 1 if the given column exists, otherwise 0. If parameter `hostname` is given, the check is performed on a remote server.
|
||||
If the table does not exist, an exception is thrown.
|
||||
For elements in a nested data structure, the function checks for the existence of a column. For the nested data structure itself, the function returns 0.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT hasColumnInTable('system','metrics','metric')
|
||||
```
|
||||
|
||||
```response
|
||||
1
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT hasColumnInTable('system','metrics','non-existing_column')
|
||||
```
|
||||
|
||||
```response
|
||||
0
|
||||
```
|
||||
|
||||
## hasThreadFuzzer
|
||||
|
||||
Returns whether Thread Fuzzer is effective. It can be used in tests to prevent runs from being too long.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
hasThreadFuzzer();
|
||||
```
|
||||
|
||||
## bar
|
||||
|
||||
Builds a bar chart.
|
||||
|
@ -99,7 +99,7 @@ Alias: `OCTET_LENGTH`
|
||||
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
Alias:
|
||||
- `CHAR_LENGTH``
|
||||
- `CHAR_LENGTH`
|
||||
- `CHARACTER_LENGTH`
|
||||
|
||||
## leftPad
|
||||
|
@ -481,9 +481,9 @@ Alias: `haystack NOT ILIKE pattern` (operator)
|
||||
|
||||
## ngramDistance
|
||||
|
||||
Calculates the 4-gram distance between a `haystack` string and a `needle` string. For that, it counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns a Float32 between 0 and 1. The smaller the result is, the more strings are similar to each other. Throws an exception if constant `needle` or `haystack` arguments are more than 32Kb in size. If any of non-constant `haystack` or `needle` arguments is more than 32Kb in size, the distance is always 1.
|
||||
Calculates the 4-gram distance between a `haystack` string and a `needle` string. For this, it counts the symmetric difference between two multisets of 4-grams and normalizes it by the sum of their cardinalities. Returns a [Float32](../../sql-reference/data-types/float.md/#float32-float64) between 0 and 1. The smaller the result is, the more similar the strings are to each other.
|
||||
|
||||
Functions `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseInsensitiveUTF8` provide case-insensitive and/or UTF-8 variants of this function.
|
||||
Functions [`ngramDistanceCaseInsensitive`](#ngramdistancecaseinsensitive), [`ngramDistanceUTF8`](#ngramdistanceutf8), [`ngramDistanceCaseInsensitiveUTF8`](#ngramdistancecaseinsensitiveutf8) provide case-insensitive and/or UTF-8 variants of this function.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -491,15 +491,170 @@ Functions `ngramDistanceCaseInsensitive, ngramDistanceUTF8, ngramDistanceCaseIns
|
||||
ngramDistance(haystack, needle)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: First comparison string. [String literal](../syntax#string)
|
||||
- `needle`: Second comparison string. [String literal](../syntax#string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value between 0 and 1 representing the similarity between the two strings. [Float32](../../sql-reference/data-types/float.md/#float32-float64)
|
||||
|
||||
**Implementation details**
|
||||
|
||||
This function will throw an exception if constant `needle` or `haystack` arguments are more than 32Kb in size. If any non-constant `haystack` or `needle` arguments are more than 32Kb in size, then the distance is always 1.
|
||||
|
||||
**Examples**
|
||||
|
||||
The more similar two strings are to each other, the closer the result will be to 0 (identical).
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramDistance('ClickHouse','ClickHouse!');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0.06666667
|
||||
```
|
||||
|
||||
The less similar two strings are to each, the larger the result will be.
|
||||
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramDistance('ClickHouse','House');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0.5555556
|
||||
```
|
||||
|
||||
## ngramDistanceCaseInsensitive
|
||||
|
||||
Provides a case-insensitive variant of [ngramDistance](#ngramdistance).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
ngramDistanceCaseInsensitive(haystack, needle)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: First comparison string. [String literal](../syntax#string)
|
||||
- `needle`: Second comparison string. [String literal](../syntax#string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value between 0 and 1 representing the similarity between the two strings. [Float32](../../sql-reference/data-types/float.md/#float32-float64)
|
||||
|
||||
**Examples**
|
||||
|
||||
With [ngramDistance](#ngramdistance) differences in case will affect the similarity value:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramDistance('ClickHouse','clickhouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0.71428573
|
||||
```
|
||||
|
||||
With [ngramDistanceCaseInsensitive](#ngramdistancecaseinsensitive) case is ignored so two identical strings differing only in case will now return a low similarity value:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramDistanceCaseInsensitive('ClickHouse','clickhouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0
|
||||
```
|
||||
|
||||
## ngramDistanceUTF8
|
||||
|
||||
Provides a UTF-8 variant of [ngramDistance](#ngramdistance). Assumes that `needle` and `haystack` strings are UTF-8 encoded strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
ngramDistanceUTF8(haystack, needle)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: First UTF-8 encoded comparison string. [String literal](../syntax#string)
|
||||
- `needle`: Second UTF-8 encoded comparison string. [String literal](../syntax#string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value between 0 and 1 representing the similarity between the two strings. [Float32](../../sql-reference/data-types/float.md/#float32-float64)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramDistanceUTF8('abcde','cde');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0.5
|
||||
```
|
||||
|
||||
## ngramDistanceCaseInsensitiveUTF8
|
||||
|
||||
Provides a case-insensitive variant of [ngramDistanceUTF8](#ngramdistanceutf8).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
ngramDistanceCaseInsensitiveUTF8(haystack, needle)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: First UTF-8 encoded comparison string. [String literal](../syntax#string)
|
||||
- `needle`: Second UTF-8 encoded comparison string. [String literal](../syntax#string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value between 0 and 1 representing the similarity between the two strings. [Float32](../../sql-reference/data-types/float.md/#float32-float64)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramDistanceCaseInsensitiveUTF8('abcde','CDE');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0.5
|
||||
```
|
||||
|
||||
## ngramSearch
|
||||
|
||||
Like `ngramDistance` but calculates the non-symmetric difference between a `needle` string and a `haystack` string, i.e. the number of n-grams from `needle` minus the common number of n-grams normalized by the number of `needle` n-grams. Returns a Float32 between 0 and 1. The bigger the result is, the more likely `needle` is in the `haystack`. This function is useful for fuzzy string search. Also see function `soundex`.
|
||||
Like `ngramDistance` but calculates the non-symmetric difference between a `needle` string and a `haystack` string, i.e. the number of n-grams from the needle minus the common number of n-grams normalized by the number of `needle` n-grams. Returns a [Float32](../../sql-reference/data-types/float.md/#float32-float64) between 0 and 1. The bigger the result is, the more likely `needle` is in the `haystack`. This function is useful for fuzzy string search. Also see function [`soundex`](../../sql-reference/functions/string-functions#soundex).
|
||||
|
||||
Functions `ngramSearchCaseInsensitive, ngramSearchUTF8, ngramSearchCaseInsensitiveUTF8` provide case-insensitive and/or UTF-8 variants of this function.
|
||||
|
||||
:::note
|
||||
The UTF-8 variants use the 3-gram distance. These are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters.
|
||||
:::
|
||||
Functions [`ngramSearchCaseInsensitive`](#ngramsearchcaseinsensitive), [`ngramSearchUTF8`](#ngramsearchutf8), [`ngramSearchCaseInsensitiveUTF8`](#ngramsearchcaseinsensitiveutf8) provide case-insensitive and/or UTF-8 variants of this function.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -507,6 +662,140 @@ The UTF-8 variants use the 3-gram distance. These are not perfectly fair n-gram
|
||||
ngramSearch(haystack, needle)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: First comparison string. [String literal](../syntax#string)
|
||||
- `needle`: Second comparison string. [String literal](../syntax#string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value between 0 and 1 representing the likelihood of the `needle` being in the `haystack`. [Float32](../../sql-reference/data-types/float.md/#float32-float64)
|
||||
|
||||
**Implementation details**
|
||||
|
||||
:::note
|
||||
The UTF-8 variants use the 3-gram distance. These are not perfectly fair n-gram distances. We use 2-byte hashes to hash n-grams and then calculate the (non-)symmetric difference between these hash tables – collisions may occur. With UTF-8 case-insensitive format we do not use fair `tolower` function – we zero the 5-th bit (starting from zero) of each codepoint byte and first bit of zeroth byte if bytes more than one – this works for Latin and mostly for all Cyrillic letters.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramSearch('Hello World','World Hello');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0.5
|
||||
```
|
||||
|
||||
## ngramSearchCaseInsensitive
|
||||
|
||||
Provides a case-insensitive variant of [ngramSearch](#ngramSearch).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
ngramSearchCaseInsensitive(haystack, needle)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: First comparison string. [String literal](../syntax#string)
|
||||
- `needle`: Second comparison string. [String literal](../syntax#string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value between 0 and 1 representing the likelihood of the `needle` being in the `haystack`. [Float32](../../sql-reference/data-types/float.md/#float32-float64)
|
||||
|
||||
The bigger the result is, the more likely `needle` is in the `haystack`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramSearchCaseInsensitive('Hello World','hello');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
1
|
||||
```
|
||||
|
||||
## ngramSearchUTF8
|
||||
|
||||
Provides a UTF-8 variant of [ngramSearch](#ngramsearch) in which `needle` and `haystack` are assumed to be UTF-8 encoded strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
ngramSearchUTF8(haystack, needle)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: First UTF-8 encoded comparison string. [String literal](../syntax#string)
|
||||
- `needle`: Second UTF-8 encoded comparison string. [String literal](../syntax#string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value between 0 and 1 representing the likelihood of the `needle` being in the `haystack`. [Float32](../../sql-reference/data-types/float.md/#float32-float64)
|
||||
|
||||
The bigger the result is, the more likely `needle` is in the `haystack`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramSearchUTF8('абвгдеёжз', 'гдеёзд');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0.5
|
||||
```
|
||||
|
||||
## ngramSearchCaseInsensitiveUTF8
|
||||
|
||||
Provides a case-insensitive variant of [ngramSearchUTF8](#ngramsearchutf8) in which `needle` and `haystack`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
ngramSearchCaseInsensitiveUTF8(haystack, needle)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: First UTF-8 encoded comparison string. [String literal](../syntax#string)
|
||||
- `needle`: Second UTF-8 encoded comparison string. [String literal](../syntax#string)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value between 0 and 1 representing the likelihood of the `needle` being in the `haystack`. [Float32](../../sql-reference/data-types/float.md/#float32-float64)
|
||||
|
||||
The bigger the result is, the more likely `needle` is in the `haystack`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ngramSearchCaseInsensitiveUTF8('абвГДЕёжз', 'АбвгдЕЁжз');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
0.57142854
|
||||
```
|
||||
|
||||
## countSubstrings
|
||||
|
||||
Returns how often substring `needle` occurs in string `haystack`.
|
||||
@ -678,8 +967,10 @@ Type: `UInt8`.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT hasSubsequence('garbage', 'arg') ;
|
||||
SELECT hasSubsequence('garbage', 'arg');
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -694,10 +985,263 @@ Result:
|
||||
|
||||
Like [hasSubsequence](#hasSubsequence) but searches case-insensitively.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hasSubsequenceCaseInsensitive(haystack, needle)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — String in which the search is performed. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `needle` — Subsequence to be searched. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- 1, if needle is a subsequence of haystack.
|
||||
- 0, otherwise.
|
||||
|
||||
Type: `UInt8`.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT hasSubsequenceCaseInsensitive('garbage', 'ARG');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─hasSubsequenceCaseInsensitive('garbage', 'ARG')─┐
|
||||
│ 1 │
|
||||
└─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hasSubsequenceUTF8
|
||||
|
||||
Like [hasSubsequence](#hasSubsequence) but assumes `haystack` and `needle` are UTF-8 encoded strings.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hasSubsequenceUTF8(haystack, needle)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — String in which the search is performed. UTF-8 encoded [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `needle` — Subsequence to be searched. UTF-8 encoded [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- 1, if needle is a subsequence of haystack.
|
||||
- 0, otherwise.
|
||||
|
||||
Type: `UInt8`.
|
||||
|
||||
Query:
|
||||
|
||||
**Examples**
|
||||
|
||||
``` sql
|
||||
select hasSubsequenceUTF8('ClickHouse - столбцовая система управления базами данных', 'система');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─hasSubsequenceUTF8('ClickHouse - столбцовая система управления базами данных', 'система')─┐
|
||||
│ 1 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hasSubsequenceCaseInsensitiveUTF8
|
||||
|
||||
Like [hasSubsequenceUTF8](#hasSubsequenceUTF8) but searches case-insensitively.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hasSubsequenceCaseInsensitiveUTF8(haystack, needle)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — String in which the search is performed. UTF-8 encoded [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `needle` — Subsequence to be searched. UTF-8 encoded [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- 1, if needle is a subsequence of haystack.
|
||||
- 0, otherwise.
|
||||
|
||||
Type: `UInt8`.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select hasSubsequenceCaseInsensitiveUTF8('ClickHouse - столбцовая система управления базами данных', 'СИСТЕМА');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─hasSubsequenceCaseInsensitiveUTF8('ClickHouse - столбцовая система управления базами данных', 'СИСТЕМА')─┐
|
||||
│ 1 │
|
||||
└──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hasToken
|
||||
|
||||
Returns 1 if a given token is present in a haystack, or 0 otherwise.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
hasToken(haystack, token)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: String in which the search is performed. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `token`: Maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 1, if the token is present in the haystack.
|
||||
- 0, if the token is not present.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
Token must be a constant string. Supported by tokenbf_v1 index specialization.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT hasToken('Hello World','Hello');
|
||||
```
|
||||
|
||||
```response
|
||||
1
|
||||
```
|
||||
|
||||
## hasTokenOrNull
|
||||
|
||||
Returns 1 if a given token is present, 0 if not present, and null if the token is ill-formed.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
hasTokenOrNull(haystack, token)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: String in which the search is performed. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `token`: Maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 1, if the token is present in the haystack.
|
||||
- 0, if the token is not present in the haystack.
|
||||
- null, if the token is ill-formed.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
Token must be a constant string. Supported by tokenbf_v1 index specialization.
|
||||
|
||||
**Example**
|
||||
|
||||
Where `hasToken` would throw an error for an ill-formed token, `hasTokenOrNull` returns `null` for an ill-formed token.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT hasTokenOrNull('Hello World','Hello,World');
|
||||
```
|
||||
|
||||
```response
|
||||
null
|
||||
```
|
||||
|
||||
## hasTokenCaseInsensitive
|
||||
|
||||
Returns 1 if a given token is present in a haystack, 0 otherwise. Ignores case.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
hasTokenCaseInsensitive(haystack, token)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: String in which the search is performed. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `token`: Maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 1, if the token is present in the haystack.
|
||||
- 0, otherwise.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
Token must be a constant string. Supported by tokenbf_v1 index specialization.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT hasTokenCaseInsensitive('Hello World','hello');
|
||||
```
|
||||
|
||||
```response
|
||||
1
|
||||
```
|
||||
|
||||
## hasTokenCaseInsensitiveOrNull
|
||||
|
||||
Returns 1 if a given token is present in a haystack, 0 otherwise. Ignores case and returns null if the token is ill-formed.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
hasTokenCaseInsensitiveOrNull(haystack, token)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `haystack`: String in which the search is performed. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `token`: Maximal length substring between two non alphanumeric ASCII characters (or boundaries of haystack).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 1, if the token is present in the haystack.
|
||||
- 0, if token is not present.
|
||||
- null, if the token is ill-formed.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
Token must be a constant string. Supported by tokenbf_v1 index specialization.
|
||||
|
||||
**Example**
|
||||
|
||||
|
||||
Where `hasTokenCaseInsensitive` would throw an error for an ill-formed token, `hasTokenCaseInsensitiveOrNull` returns `null` for an ill-formed token.
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT hasTokenCaseInsensitiveOrNull('Hello World','hello,world');
|
||||
```
|
||||
|
||||
```response
|
||||
null
|
||||
```
|
@ -56,7 +56,9 @@ Entries for finished mutations are not deleted right away (the number of preserv
|
||||
|
||||
For non-replicated tables, all `ALTER` queries are performed synchronously. For replicated tables, the query just adds instructions for the appropriate actions to `ZooKeeper`, and the actions themselves are performed as soon as possible. However, the query can wait for these actions to be completed on all the replicas.
|
||||
|
||||
For all `ALTER` queries, you can use the [alter_sync](/docs/en/operations/settings/settings.md/#alter-sync) setting to set up waiting.
|
||||
For `ALTER` queries that creates mutations (e.g.: including, but not limited to `UPDATE`, `DELETE`, `MATERIALIZE INDEX`, `MATERIALIZE PROJECTION`, `MATERIALIZE COLUMN`, `APPLY DELETED MASK`, `CLEAR STATISTIC`, `MATERIALIZE STATISTIC`) the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||
|
||||
For other `ALTER` queries which only modify the metadata, you can use the [alter_sync](/docs/en/operations/settings/settings.md/#alter-sync) setting to set up waiting.
|
||||
|
||||
You can specify how long (in seconds) to wait for inactive replicas to execute all `ALTER` queries with the [replication_wait_for_inactive_replica_timeout](/docs/en/operations/settings/settings.md/#replication-wait-for-inactive-replica-timeout) setting.
|
||||
|
||||
@ -64,8 +66,6 @@ You can specify how long (in seconds) to wait for inactive replicas to execute a
|
||||
For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
|
||||
:::
|
||||
|
||||
For `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Handling Updates and Deletes in ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse)
|
||||
|
@ -64,6 +64,14 @@ RELOAD FUNCTIONS [ON CLUSTER cluster_name]
|
||||
RELOAD FUNCTION [ON CLUSTER cluster_name] function_name
|
||||
```
|
||||
|
||||
## RELOAD ASYNCHRONOUS METRICS
|
||||
|
||||
Re-calculates all [asynchronous metrics](../../operations/system-tables/asynchronous_metrics.md). Since asynchronous metrics are periodically updated based on setting [asynchronous_metrics_update_period_s](../../operations/server-configuration-parameters/settings.md), updating them manually using this statement is typically not necessary.
|
||||
|
||||
```sql
|
||||
RELOAD ASYNCHRONOUS METRICS [ON CLUSTER cluster_name]
|
||||
```
|
||||
|
||||
## DROP DNS CACHE
|
||||
|
||||
Clears ClickHouse’s internal DNS cache. Sometimes (for old ClickHouse versions) it is necessary to use this command when changing the infrastructure (changing the IP address of another ClickHouse server or the server used by dictionaries).
|
||||
|
@ -2776,7 +2776,7 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
|
||||
- 0 — номера строк не выводятся.
|
||||
- 1 — номера строк выводятся.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
Значение по умолчанию: `1`.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -2798,7 +2798,7 @@ SELECT TOP 3 name, value FROM system.settings;
|
||||
```
|
||||
### output_format_pretty_color {#output_format_pretty_color}
|
||||
|
||||
Включает/выключает управляющие последовательности ANSI в форматах Pretty.
|
||||
Включает/выключает управляющие последовательности ANSI в форматах Pretty.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
@ -4123,7 +4123,7 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca
|
||||
## session_timezone {#session_timezone}
|
||||
|
||||
Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан часовой пояс, будут интерпретированы как относящиеся к указанной зоне.
|
||||
При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера.
|
||||
При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера.
|
||||
|
||||
Функции `timeZone()` and `serverTimezone()` возвращают часовой пояс текущей сессии и сервера соответственно.
|
||||
|
||||
|
@ -476,7 +476,7 @@ FROM
|
||||
|
||||
- `r1` - количество уникальных посетителей за 2020-01-01 (`cond1`).
|
||||
- `r2` - количество уникальных посетителей в период между 2020-01-01 и 2020-01-02 (`cond1` и `cond2`).
|
||||
- `r3` - количество уникальных посетителей в период между 2020-01-01 и 2020-01-03 (`cond1` и `cond3`).
|
||||
- `r3` - количество уникальных посетителей в период за 2020-01-01 и 2020-01-03 (`cond1` и `cond3`).
|
||||
|
||||
## uniqUpTo(N)(x) {#uniquptonx}
|
||||
|
||||
|
@ -120,7 +120,7 @@ FROM dt
|
||||
- [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Функции для работы с массивами](../../sql-reference/functions/array-functions.md)
|
||||
- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format)
|
||||
- [Настройка `date_time_output_format`](../../operations/settings/index.md)
|
||||
- [Настройка `date_time_output_format`](../../operations/settings/index.md#settings-date_time_output_format)
|
||||
- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone)
|
||||
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
|
||||
|
@ -627,7 +627,7 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
|
||||
## age
|
||||
|
||||
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 микросекунду.
|
||||
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 наносекунду.
|
||||
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
|
||||
|
||||
**Синтаксис**
|
||||
@ -641,6 +641,7 @@ age('unit', startdate, enddate, [timezone])
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `nanosecond` (возможные сокращения: `ns`)
|
||||
- `microsecond` (возможные сокращения: `us`, `u`)
|
||||
- `millisecond` (возможные сокращения: `ms`)
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
@ -716,6 +717,7 @@ date_diff('unit', startdate, enddate, [timezone])
|
||||
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
|
||||
Возможные значения:
|
||||
|
||||
- `nanosecond` (возможные сокращения: `ns`)
|
||||
- `microsecond` (возможные сокращения: `us`, `u`)
|
||||
- `millisecond` (возможные сокращения: `ms`)
|
||||
- `second` (возможные сокращения: `ss`, `s`)
|
||||
|
@ -472,7 +472,7 @@ FROM
|
||||
|
||||
- `r1`-2020-01-01期间访问该网站的独立访问者数量( `cond1` 条件)。
|
||||
- `r2`-在2020-01-01和2020-01-02之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond2` 条件)。
|
||||
- `r3`-在2020-01-01和2020-01-03之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond3` 条件)。
|
||||
- `r3`-在2020-01-01和2020-01-03 网站的独立访客数量 (`cond1` 和 `cond3` 条件)。
|
||||
|
||||
## uniqUpTo(N)(x) {#uniquptonx}
|
||||
|
||||
|
@ -643,6 +643,7 @@ date_diff('unit', startdate, enddate, [timezone])
|
||||
- `unit` — `value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
|
||||
可能的值:
|
||||
|
||||
- `nanosecond`
|
||||
- `microsecond`
|
||||
- `millisecond`
|
||||
- `second`
|
||||
|
@ -30,10 +30,6 @@ conflicts:
|
||||
contents:
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse-library-bridge.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse-library-bridge.debug
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-common-static-dbg/AUTHORS
|
||||
|
@ -36,10 +36,6 @@ contents:
|
||||
dst: /usr/bin/clickhouse
|
||||
- src: root/usr/bin/clickhouse-extract-from-config
|
||||
dst: /usr/bin/clickhouse-extract-from-config
|
||||
- src: root/usr/bin/clickhouse-library-bridge
|
||||
dst: /usr/bin/clickhouse-library-bridge
|
||||
- src: root/usr/bin/clickhouse-odbc-bridge
|
||||
dst: /usr/bin/clickhouse-odbc-bridge
|
||||
- src: root/usr/share/bash-completion/completions
|
||||
dst: /usr/share/bash-completion/completions
|
||||
- src: root/usr/share/clickhouse
|
||||
|
35
packages/clickhouse-library-bridge.yaml
Normal file
35
packages/clickhouse-library-bridge.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# package sources should be placed in ${PWD}/root
|
||||
# nfpm should run from the same directory with a config
|
||||
name: "clickhouse-library-bridge"
|
||||
description: |
|
||||
ClickHouse Library Bridge - is a separate process for loading libraries for the 'library' dictionary sources and the CatBoost library.
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
|
||||
# Common packages config
|
||||
arch: "${DEB_ARCH}" # amd64, arm64
|
||||
platform: "linux"
|
||||
version: "${CLICKHOUSE_VERSION_STRING}"
|
||||
vendor: "ClickHouse Inc."
|
||||
homepage: "https://clickhouse.com"
|
||||
license: "Apache"
|
||||
section: "database"
|
||||
priority: "optional"
|
||||
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
|
||||
deb:
|
||||
fields:
|
||||
Source: clickhouse
|
||||
|
||||
# Package specific content
|
||||
contents:
|
||||
- src: root/usr/bin/clickhouse-library-bridge
|
||||
dst: /usr/bin/clickhouse-library-bridge
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/AUTHORS
|
||||
- src: ../CHANGELOG.md
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/CHANGELOG.md
|
||||
- src: ../LICENSE
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/LICENSE
|
||||
- src: ../README.md
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/README.md
|
35
packages/clickhouse-odbc-bridge.yaml
Normal file
35
packages/clickhouse-odbc-bridge.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# package sources should be placed in ${PWD}/root
|
||||
# nfpm should run from the same directory with a config
|
||||
name: "clickhouse-odbc-bridge"
|
||||
description: |
|
||||
ClickHouse ODBC Bridge - is a separate process for loading ODBC drivers and interacting with external databases using the ODBC protocol.
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
|
||||
# Common packages config
|
||||
arch: "${DEB_ARCH}" # amd64, arm64
|
||||
platform: "linux"
|
||||
version: "${CLICKHOUSE_VERSION_STRING}"
|
||||
vendor: "ClickHouse Inc."
|
||||
homepage: "https://clickhouse.com"
|
||||
license: "Apache"
|
||||
section: "database"
|
||||
priority: "optional"
|
||||
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
|
||||
deb:
|
||||
fields:
|
||||
Source: clickhouse
|
||||
|
||||
# Package specific content
|
||||
contents:
|
||||
- src: root/usr/bin/clickhouse-odbc-bridge
|
||||
dst: /usr/bin/clickhouse-odbc-bridge
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/AUTHORS
|
||||
- src: ../CHANGELOG.md
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/CHANGELOG.md
|
||||
- src: ../LICENSE
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/LICENSE
|
||||
- src: ../README.md
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/README.md
|
@ -17,12 +17,13 @@
|
||||
|
||||
#include <Access/AccessControl.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Common/Config/getClientConfigPath.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/formatReadable.h>
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
|
@ -237,7 +237,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
ASTPtr res = parseQueryAndMovePosition(
|
||||
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth, cmd_settings.max_parser_backtracks);
|
||||
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload = nullptr;
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload;
|
||||
/// If the query is INSERT ... VALUES, then we will try to parse the data.
|
||||
if (auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
|
||||
{
|
||||
|
@ -24,9 +24,4 @@ target_link_libraries(clickhouse-library-bridge PRIVATE
|
||||
|
||||
set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
|
||||
|
||||
if (SPLIT_DEBUG_SYMBOLS)
|
||||
clickhouse_split_debug_symbols(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-library-bridge)
|
||||
else()
|
||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
ExternalDictionaryLibraryAPI::CStrings strings; // will pass pointer to lib
|
||||
|
||||
private:
|
||||
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder = nullptr;
|
||||
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder;
|
||||
Container strings_holder;
|
||||
};
|
||||
|
||||
|
@ -30,12 +30,7 @@ target_link_libraries(clickhouse-odbc-bridge PRIVATE
|
||||
set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
|
||||
target_compile_options (clickhouse-odbc-bridge PRIVATE -Wno-reserved-id-macro -Wno-keyword-macro)
|
||||
|
||||
if (SPLIT_DEBUG_SYMBOLS)
|
||||
clickhouse_split_debug_symbols(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-odbc-bridge)
|
||||
else()
|
||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(tests)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
#include <Poco/Net/HTTPServerResponse.h>
|
||||
#include <Poco/NumberParser.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/BridgeProtocolVersion.h>
|
||||
#include <Common/quoteString.h>
|
||||
|
@ -5,7 +5,6 @@
|
||||
#if USE_ODBC
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Server/HTTP/HTTPRequestHandler.h>
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
|
@ -96,7 +96,7 @@
|
||||
<to>https://{bucket}.s3.amazonaws.com</to>
|
||||
</s3>
|
||||
<gs>
|
||||
<to>https://{bucket}.storage.googleapis.com</to>
|
||||
<to>https://storage.googleapis.com/{bucket}</to>
|
||||
</gs>
|
||||
<oss>
|
||||
<to>https://{bucket}.oss.aliyuncs.com</to>
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Poco/UUIDGenerator.h>
|
||||
#include <base/insertAtEnd.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include <Access/AccessRights.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <unordered_map>
|
||||
|
@ -115,34 +115,34 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], row_num, arena);
|
||||
data(place).add(*columns[0], row_num, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
|
||||
{
|
||||
this->data(place).addManyDefaults(*columns[0], 0, arena);
|
||||
data(place).addManyDefaults(*columns[0], 0, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(this->data(rhs), arena);
|
||||
data(place).add(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/Combinators/AggregateFunctionCombinatorFactory.h>
|
||||
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
|
||||
static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000;
|
||||
|
||||
|
@ -559,7 +559,7 @@ public:
|
||||
ptr = ptrs[row_num];
|
||||
}
|
||||
|
||||
this->data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(
|
||||
@ -572,7 +572,7 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs), arena);
|
||||
data(place).merge(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t> /* version */) const override
|
||||
@ -590,7 +590,7 @@ public:
|
||||
auto & array = assert_cast<ColumnArray &>(to);
|
||||
auto & str = assert_cast<ColumnString &>(array.getData());
|
||||
|
||||
this->data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
|
||||
array.getOffsets().push_back(str.size());
|
||||
}
|
||||
|
@ -89,10 +89,10 @@ struct GroupArraySamplerData
|
||||
chassert(lim != 0);
|
||||
|
||||
/// With a large number of values, we will generate random numbers several times slower.
|
||||
if (lim <= static_cast<UInt64>(rng.max()))
|
||||
if (lim <= static_cast<UInt64>(pcg32_fast::max()))
|
||||
return rng() % lim;
|
||||
else
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||
}
|
||||
|
||||
void randomShuffle()
|
||||
|
@ -242,7 +242,7 @@ public:
|
||||
{
|
||||
Float64 x = getFloat64DataFromColumn(columns[0], row_num, this->x_type);
|
||||
Float64 y = getFloat64DataFromColumn(columns[1], row_num, this->y_type);
|
||||
this->data(place).add(x, y, arena);
|
||||
data(place).add(x, y, arena);
|
||||
}
|
||||
|
||||
Float64 getFloat64DataFromColumn(const IColumn * column, size_t row_num, TypeIndex type_index) const
|
||||
@ -264,25 +264,25 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
|
||||
{
|
||||
auto res = this->data(place).getResult(total_buckets, arena);
|
||||
auto res = data(place).getResult(total_buckets, arena);
|
||||
|
||||
auto & col = assert_cast<ColumnArray &>(to);
|
||||
auto & col_offsets = assert_cast<ColumnArray::ColumnOffsets &>(col.getOffsetsColumn());
|
||||
|
@ -205,35 +205,35 @@ public:
|
||||
UInt8 is_second = columns[1]->getUInt(row_num);
|
||||
|
||||
if (is_second)
|
||||
this->data(place).addY(value, arena);
|
||||
data(place).addY(value, arena);
|
||||
else
|
||||
this->data(place).addX(value, arena);
|
||||
data(place).addX(value, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
if (!this->data(place).size_x || !this->data(place).size_y)
|
||||
if (!data(place).size_x || !data(place).size_y)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} require both samples to be non empty", getName());
|
||||
|
||||
auto [u_statistic, p_value] = this->data(place).getResult(alternative, continuity_correction);
|
||||
auto [u_statistic, p_value] = data(place).getResult(alternative, continuity_correction);
|
||||
|
||||
/// Because p-value is a probability.
|
||||
p_value = std::min(1.0, std::max(0.0, p_value));
|
||||
|
@ -66,31 +66,31 @@ public:
|
||||
{
|
||||
Float64 new_x = columns[0]->getFloat64(row_num);
|
||||
Float64 new_y = columns[1]->getFloat64(row_num);
|
||||
this->data(place).addX(new_x, arena);
|
||||
this->data(place).addY(new_y, arena);
|
||||
data(place).addX(new_x, arena);
|
||||
data(place).addY(new_y, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto answer = this->data(place).getResult();
|
||||
auto answer = data(place).getResult();
|
||||
|
||||
auto & column = static_cast<ColumnVector<Float64> &>(to);
|
||||
column.getData().push_back(answer);
|
||||
|
@ -102,24 +102,24 @@ public:
|
||||
auto event = assert_cast<const ColumnVector<UInt8> *>(columns[i])->getData()[row_num];
|
||||
if (event)
|
||||
{
|
||||
this->data(place).add(i);
|
||||
data(place).add(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
@ -130,13 +130,13 @@ public:
|
||||
ColumnArray::Offset current_offset = data_to.size();
|
||||
data_to.resize(current_offset + events_size);
|
||||
|
||||
const bool first_flag = this->data(place).events.test(0);
|
||||
const bool first_flag = data(place).events.test(0);
|
||||
data_to[current_offset] = first_flag;
|
||||
++current_offset;
|
||||
|
||||
for (size_t i = 1; i < events_size; ++i)
|
||||
{
|
||||
data_to[current_offset] = (first_flag && this->data(place).events.test(i));
|
||||
data_to[current_offset] = (first_flag && data(place).events.test(i));
|
||||
++current_offset;
|
||||
}
|
||||
|
||||
|
@ -123,22 +123,22 @@ public:
|
||||
Float64 x = columns[0]->getFloat64(row_num);
|
||||
Float64 y = columns[1]->getFloat64(row_num);
|
||||
|
||||
this->data(place).add(x, y);
|
||||
data(place).add(x, y);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
@ -168,8 +168,8 @@ public:
|
||||
IColumn & to,
|
||||
Arena *) const override
|
||||
{
|
||||
Float64 k = this->data(place).getK();
|
||||
Float64 b = this->data(place).getB(k);
|
||||
Float64 k = data(place).getK();
|
||||
Float64 b = data(place).getB(k);
|
||||
|
||||
auto & col_tuple = assert_cast<ColumnTuple &>(to);
|
||||
auto & col_k = assert_cast<ColumnVector<Float64> &>(col_tuple.getColumn(0));
|
||||
|
@ -120,7 +120,7 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], row_num, arena);
|
||||
data(place).add(*columns[0], row_num, arena);
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(
|
||||
@ -131,7 +131,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override
|
||||
{
|
||||
if (this->data(place).isNull())
|
||||
if (data(place).isNull())
|
||||
return;
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlace(
|
||||
row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
@ -146,7 +146,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override
|
||||
{
|
||||
if (this->data(place).isNull())
|
||||
if (data(place).isNull())
|
||||
return;
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlaceNotNull(
|
||||
row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
@ -154,29 +154,29 @@ public:
|
||||
|
||||
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], 0, arena);
|
||||
data(place).add(*columns[0], 0, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(this->data(rhs), arena);
|
||||
data(place).add(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -150,13 +150,13 @@ private:
|
||||
|
||||
Float64 getResult(ConstAggregateDataPtr __restrict place) const
|
||||
{
|
||||
const auto & data = this->data(place);
|
||||
const auto & dt = data(place);
|
||||
switch (kind)
|
||||
{
|
||||
case VarKind::varSampStable: return getVarSamp(data.m2, data.count);
|
||||
case VarKind::stddevSampStable: return getStddevSamp(data.m2, data.count);
|
||||
case VarKind::varPopStable: return getVarPop(data.m2, data.count);
|
||||
case VarKind::stddevPopStable: return getStddevPop(data.m2, data.count);
|
||||
case VarKind::varSampStable: return getVarSamp(dt.m2, dt.count);
|
||||
case VarKind::stddevSampStable: return getStddevSamp(dt.m2, dt.count);
|
||||
case VarKind::varPopStable: return getVarPop(dt.m2, dt.count);
|
||||
case VarKind::stddevPopStable: return getStddevPop(dt.m2, dt.count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,22 +182,22 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
this->data(place).update(*columns[0], row_num);
|
||||
data(place).update(*columns[0], row_num);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).mergeWith(this->data(rhs));
|
||||
data(place).mergeWith(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
|
@ -491,7 +491,7 @@ public:
|
||||
std::vector<const UInt8 *> nullable_filters;
|
||||
const IColumn * nested_columns[number_of_arguments];
|
||||
|
||||
std::unique_ptr<UInt8[]> final_flags = nullptr;
|
||||
std::unique_ptr<UInt8[]> final_flags;
|
||||
const UInt8 * final_flags_ptr = nullptr;
|
||||
|
||||
if (if_argument_pos >= 0)
|
||||
|
@ -1,17 +1,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction_fwd.h>
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Core/ColumnNumbers.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/IResolvedFunction.h>
|
||||
#include <Core/ValuesWithType.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ThreadPool_fwd.h>
|
||||
#include <Core/IResolvedFunction.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
@ -46,13 +47,6 @@ class IWindowFunction;
|
||||
using DataTypePtr = std::shared_ptr<const IDataType>;
|
||||
using DataTypes = std::vector<DataTypePtr>;
|
||||
|
||||
using AggregateDataPtr = char *;
|
||||
using AggregateDataPtrs = std::vector<AggregateDataPtr>;
|
||||
using ConstAggregateDataPtr = const char *;
|
||||
|
||||
class IAggregateFunction;
|
||||
using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
|
||||
|
||||
struct AggregateFunctionProperties;
|
||||
|
||||
/** Aggregate functions interface.
|
||||
|
14
src/AggregateFunctions/IAggregateFunction_fwd.h
Normal file
14
src/AggregateFunctions/IAggregateFunction_fwd.h
Normal file
@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
using AggregateDataPtr = char *;
|
||||
using AggregateDataPtrs = std::vector<AggregateDataPtr>;
|
||||
using ConstAggregateDataPtr = const char *;
|
||||
|
||||
class IAggregateFunction;
|
||||
using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
|
||||
}
|
@ -258,10 +258,10 @@ private:
|
||||
chassert(limit > 0);
|
||||
|
||||
/// With a large number of values, we will generate random numbers several times slower.
|
||||
if (limit <= static_cast<UInt64>(rng.max()))
|
||||
if (limit <= static_cast<UInt64>(pcg32_fast::max()))
|
||||
return rng() % limit;
|
||||
else
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32_fast::max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
|
||||
}
|
||||
|
||||
void sortIfNeeded()
|
||||
|
@ -579,7 +579,7 @@ std::optional<size_t> SingleValueDataFixed<T>::getGreatestIndexNotNullIf(
|
||||
return std::nullopt;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
if ((!if_map || if_map[i] != 0) && (!null_map || null_map[i] == 0) && (vec[i] < vec[index]))
|
||||
if ((!if_map || if_map[i] != 0) && (!null_map || null_map[i] == 0) && (vec[i] > vec[index]))
|
||||
index = i;
|
||||
return {index};
|
||||
}
|
||||
|
@ -1,14 +1,12 @@
|
||||
#include <Analyzer/ArrayJoinNode.h>
|
||||
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,14 +1,12 @@
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
#include <Analyzer/TableNode.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <Analyzer/TableNode.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,16 +1,14 @@
|
||||
#include <Analyzer/JoinNode.h>
|
||||
#include <Analyzer/ListNode.h>
|
||||
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1940,8 +1940,7 @@ std::vector<String> QueryAnalyzer::collectIdentifierTypoHints(const Identifier &
|
||||
for (const auto & valid_identifier : valid_identifiers)
|
||||
prompting_strings.push_back(valid_identifier.getFullName());
|
||||
|
||||
NamePrompter<1> prompter;
|
||||
return prompter.getHints(unresolved_identifier.getFullName(), prompting_strings);
|
||||
return NamePrompter<1>::getHints(unresolved_identifier.getFullName(), prompting_strings);
|
||||
}
|
||||
|
||||
/** Wrap expression node in tuple element function calls for nested paths.
|
||||
|
@ -444,8 +444,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
|
||||
nulls_sort_direction = order_by_element.nulls_direction == 1 ? SortDirection::ASCENDING : SortDirection::DESCENDING;
|
||||
|
||||
std::shared_ptr<Collator> collator;
|
||||
if (order_by_element.collation)
|
||||
collator = std::make_shared<Collator>(order_by_element.collation->as<ASTLiteral &>().value.get<String &>());
|
||||
if (order_by_element.getCollation())
|
||||
collator = std::make_shared<Collator>(order_by_element.getCollation()->as<ASTLiteral &>().value.get<String &>());
|
||||
|
||||
const auto & sort_expression_ast = order_by_element.children.at(0);
|
||||
auto sort_expression = buildExpression(sort_expression_ast, context);
|
||||
@ -455,12 +455,12 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
|
||||
std::move(collator),
|
||||
order_by_element.with_fill);
|
||||
|
||||
if (order_by_element.fill_from)
|
||||
sort_node->getFillFrom() = buildExpression(order_by_element.fill_from, context);
|
||||
if (order_by_element.fill_to)
|
||||
sort_node->getFillTo() = buildExpression(order_by_element.fill_to, context);
|
||||
if (order_by_element.fill_step)
|
||||
sort_node->getFillStep() = buildExpression(order_by_element.fill_step, context);
|
||||
if (order_by_element.getFillFrom())
|
||||
sort_node->getFillFrom() = buildExpression(order_by_element.getFillFrom(), context);
|
||||
if (order_by_element.getFillTo())
|
||||
sort_node->getFillTo() = buildExpression(order_by_element.getFillTo(), context);
|
||||
if (order_by_element.getFillStep())
|
||||
sort_node->getFillStep() = buildExpression(order_by_element.getFillStep(), context);
|
||||
|
||||
list_node->getNodes().push_back(std::move(sort_node));
|
||||
}
|
||||
@ -558,7 +558,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildExpression(const ASTPtr & expression, co
|
||||
}
|
||||
else if (const auto * function = expression->as<ASTFunction>())
|
||||
{
|
||||
if (function->is_lambda_function)
|
||||
if (function->is_lambda_function || isASTLambdaFunction(*function))
|
||||
{
|
||||
const auto & lambda_arguments_and_expression = function->arguments->as<ASTExpressionList &>().children;
|
||||
auto & lambda_arguments_tuple = lambda_arguments_and_expression.at(0)->as<ASTFunction &>();
|
||||
|
@ -1,14 +1,15 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Settings.h>
|
||||
#include <Core/Block.h>
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
#include <QueryPipeline/SizeLimits.h>
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class IDataType;
|
||||
using DataTypePtr = std::shared_ptr<const IDataType>;
|
||||
|
||||
class Set;
|
||||
using SetPtr = std::shared_ptr<Set>;
|
||||
|
||||
|
@ -120,17 +120,18 @@ ASTPtr SortNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
|
||||
result->nulls_direction_was_explicitly_specified = nulls_sort_direction.has_value();
|
||||
|
||||
result->with_fill = with_fill;
|
||||
result->fill_from = hasFillFrom() ? getFillFrom()->toAST(options) : nullptr;
|
||||
result->fill_to = hasFillTo() ? getFillTo()->toAST(options) : nullptr;
|
||||
result->fill_step = hasFillStep() ? getFillStep()->toAST(options) : nullptr;
|
||||
result->children.push_back(getExpression()->toAST(options));
|
||||
|
||||
if (collator)
|
||||
{
|
||||
result->children.push_back(std::make_shared<ASTLiteral>(Field(collator->getLocale())));
|
||||
result->collation = result->children.back();
|
||||
}
|
||||
result->setCollation(std::make_shared<ASTLiteral>(Field(collator->getLocale())));
|
||||
|
||||
result->with_fill = with_fill;
|
||||
if (hasFillFrom())
|
||||
result->setFillFrom(getFillFrom()->toAST(options));
|
||||
if (hasFillTo())
|
||||
result->setFillTo(getFillTo()->toAST(options));
|
||||
if (hasFillStep())
|
||||
result->setFillStep(getFillStep()->toAST(options));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,11 +1,9 @@
|
||||
#include <Analyzer/WindowNode.h>
|
||||
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Parsers/ASTWindowDefinition.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,22 +1,25 @@
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <Backups/BackupCoordinationStage.h>
|
||||
#include <Backups/BackupEntriesCollector.h>
|
||||
#include <Backups/BackupEntryFromMemory.h>
|
||||
#include <Backups/IBackupCoordination.h>
|
||||
#include <Backups/BackupCoordinationStage.h>
|
||||
#include <Backups/BackupUtils.h>
|
||||
#include <Backups/DDLAdjustingForBackupVisitor.h>
|
||||
#include <Backups/IBackupCoordination.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <base/chrono_io.h>
|
||||
#include <base/insertAtEnd.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <base/sleep.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
@ -124,11 +124,12 @@ BackupReaderS3::BackupReaderS3(
|
||||
bool allow_s3_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_)
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup)
|
||||
: BackupReaderDefault(read_settings_, write_settings_, getLogger("BackupReaderS3"))
|
||||
, s3_uri(s3_uri_)
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false}
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName()))
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName(), /*ignore_user=*/is_internal_backup))
|
||||
{
|
||||
auto & request_settings = s3_settings.request_settings;
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
@ -214,11 +215,12 @@ BackupWriterS3::BackupWriterS3(
|
||||
const String & storage_class_name,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_)
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup)
|
||||
: BackupWriterDefault(read_settings_, write_settings_, getLogger("BackupWriterS3"))
|
||||
, s3_uri(s3_uri_)
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false}
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName()))
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName(), /*ignore_user=*/is_internal_backup))
|
||||
{
|
||||
auto & request_settings = s3_settings.request_settings;
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
|
@ -18,7 +18,15 @@ namespace DB
|
||||
class BackupReaderS3 : public BackupReaderDefault
|
||||
{
|
||||
public:
|
||||
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
|
||||
BackupReaderS3(
|
||||
const S3::URI & s3_uri_,
|
||||
const String & access_key_id_,
|
||||
const String & secret_access_key_,
|
||||
bool allow_s3_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup);
|
||||
~BackupReaderS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
@ -41,7 +49,16 @@ private:
|
||||
class BackupWriterS3 : public BackupWriterDefault
|
||||
{
|
||||
public:
|
||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const String & storage_class_name, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
|
||||
BackupWriterS3(
|
||||
const S3::URI & s3_uri_,
|
||||
const String & access_key_id_,
|
||||
const String & secret_access_key_,
|
||||
bool allow_s3_native_copy,
|
||||
const String & storage_class_name,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup);
|
||||
~BackupWriterS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
@ -940,6 +942,7 @@ void BackupsWorker::doRestore(
|
||||
backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup;
|
||||
backup_open_params.read_settings = getReadSettingsForRestore(context);
|
||||
backup_open_params.write_settings = getWriteSettingsForRestore(context);
|
||||
backup_open_params.is_internal_backup = restore_settings.internal;
|
||||
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
|
||||
|
||||
String current_database = context->getCurrentDatabase();
|
||||
|
@ -24,6 +24,9 @@
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <base/insertAtEnd.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
|
||||
#include <filesystem>
|
||||
#include <ranges>
|
||||
|
||||
|
@ -110,7 +110,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.allow_s3_native_copy,
|
||||
params.read_settings,
|
||||
params.write_settings,
|
||||
params.context);
|
||||
params.context,
|
||||
params.is_internal_backup);
|
||||
|
||||
return std::make_unique<BackupImpl>(
|
||||
params.backup_info,
|
||||
@ -129,7 +130,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.s3_storage_class,
|
||||
params.read_settings,
|
||||
params.write_settings,
|
||||
params.context);
|
||||
params.context,
|
||||
params.is_internal_backup);
|
||||
|
||||
return std::make_unique<BackupImpl>(
|
||||
params.backup_info,
|
||||
|
@ -207,11 +207,17 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
|
||||
max_entries = nested_pools.size();
|
||||
}
|
||||
else if (pool_mode == PoolMode::GET_ONE)
|
||||
{
|
||||
max_entries = 1;
|
||||
}
|
||||
else if (pool_mode == PoolMode::GET_MANY)
|
||||
{
|
||||
max_entries = settings.max_parallel_replicas;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode");
|
||||
}
|
||||
|
||||
if (!priority_func)
|
||||
priority_func = makeGetPriorityFunc(settings);
|
||||
|
@ -82,7 +82,7 @@ std::vector<Connection *> HedgedConnectionsFactory::getManyConnections(PoolMode
|
||||
}
|
||||
case PoolMode::GET_MANY:
|
||||
{
|
||||
max_entries = max_parallel_replicas;
|
||||
max_entries = std::min(max_parallel_replicas, shuffled_pools.size());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ private:
|
||||
/// checking the number of requested replicas that are still in process).
|
||||
size_t requested_connections_count = 0;
|
||||
|
||||
const size_t max_parallel_replicas = 0;
|
||||
const size_t max_parallel_replicas = 1;
|
||||
const bool skip_unavailable_shards = false;
|
||||
};
|
||||
|
||||
|
@ -1,7 +1,13 @@
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBufferFromArena.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <Common/AlignedBuffer.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
@ -11,10 +17,6 @@
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromArena.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -109,6 +111,11 @@ ConstArenas concatArenas(const ConstArenas & array, ConstArenaPtr arena)
|
||||
|
||||
}
|
||||
|
||||
std::string ColumnAggregateFunction::getName() const
|
||||
{
|
||||
return "AggregateFunction(" + func->getName() + ")";
|
||||
}
|
||||
|
||||
MutableColumnPtr ColumnAggregateFunction::convertToValues(MutableColumnPtr column)
|
||||
{
|
||||
/** If the aggregate function returns an unfinalized/unfinished state,
|
||||
|
@ -1,17 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction_fwd.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/PODArray.h>
|
||||
|
||||
#include <Core/Field.h>
|
||||
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Common/PODArray.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -26,6 +18,12 @@ using ArenaPtr = std::shared_ptr<Arena>;
|
||||
using ConstArenaPtr = std::shared_ptr<const Arena>;
|
||||
using ConstArenas = std::vector<ConstArenaPtr>;
|
||||
|
||||
class Context;
|
||||
using ContextPtr = std::shared_ptr<const Context>;
|
||||
|
||||
struct ColumnWithTypeAndName;
|
||||
using ColumnsWithTypeAndName = std::vector<ColumnWithTypeAndName>;
|
||||
|
||||
|
||||
/** Column of states of aggregate functions.
|
||||
* Presented as an array of pointers to the states of aggregate functions (data).
|
||||
@ -121,7 +119,7 @@ public:
|
||||
/// This method is made static and receive MutableColumnPtr object to explicitly destroy it.
|
||||
static MutableColumnPtr convertToValues(MutableColumnPtr column);
|
||||
|
||||
std::string getName() const override { return "AggregateFunction(" + func->getName() + ")"; }
|
||||
std::string getName() const override;
|
||||
const char * getFamilyName() const override { return "AggregateFunction"; }
|
||||
TypeIndex getDataType() const override { return TypeIndex::AggregateFunction; }
|
||||
|
||||
|
@ -1,12 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/Names.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <DataTypes/Serializations/JSONDataParser.h>
|
||||
#include <DataTypes/Serializations/SubcolumnsTree.h>
|
||||
#include <Common/PODArray.h>
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
|
@ -346,7 +346,7 @@ ColumnPtr ColumnSparse::filter(const Filter & filt, ssize_t) const
|
||||
}
|
||||
|
||||
auto res_values = values->filter(values_filter, values_result_size_hint);
|
||||
return this->create(res_values, std::move(res_offsets), res_offset);
|
||||
return create(res_values, std::move(res_offsets), res_offset);
|
||||
}
|
||||
|
||||
void ColumnSparse::expand(const Filter & mask, bool inverted)
|
||||
|
@ -671,7 +671,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
ReadableSize(rss),
|
||||
ReadableSize(difference));
|
||||
|
||||
total_memory_tracker.setRSS(rss, free_memory_in_allocator_arenas);
|
||||
MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,10 +2,13 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Common/Logger.h>
|
||||
#include <Common/SharedMutex.h>
|
||||
#include <Common/SharedLockGuard.h>
|
||||
#include <Common/SharedMutex.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** AtomicLogger allows to atomically change logger.
|
||||
* Standard library does not have atomic_shared_ptr, and we do not use std::atomic* operations,
|
||||
@ -49,3 +52,5 @@ private:
|
||||
mutable DB::SharedMutex log_mutex;
|
||||
LoggerPtr logger;
|
||||
};
|
||||
|
||||
}
|
||||
|
16
src/Common/CurrentThreadHelpers.cpp
Normal file
16
src/Common/CurrentThreadHelpers.cpp
Normal file
@ -0,0 +1,16 @@
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/CurrentThreadHelpers.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
bool currentThreadHasGroup()
|
||||
{
|
||||
return DB::CurrentThread::getGroup() != nullptr;
|
||||
}
|
||||
|
||||
LogsLevel currentThreadLogsLevel()
|
||||
{
|
||||
return DB::CurrentThread::get().getClientLogsLevel();
|
||||
}
|
||||
}
|
9
src/Common/CurrentThreadHelpers.h
Normal file
9
src/Common/CurrentThreadHelpers.h
Normal file
@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/LogsLevel.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
bool currentThreadHasGroup();
|
||||
LogsLevel currentThreadLogsLevel();
|
||||
}
|
@ -1,13 +1,15 @@
|
||||
#include "DateLUT.h"
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
|
||||
#include <Poco/DigestStream.h>
|
||||
#include <Poco/Exception.h>
|
||||
#include <Poco/SHA1Engine.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
|
||||
namespace
|
||||
@ -140,6 +142,38 @@ std::string determineDefaultTimeZone()
|
||||
|
||||
}
|
||||
|
||||
const DateLUTImpl & DateLUT::instance()
|
||||
{
|
||||
const auto & date_lut = getInstance();
|
||||
|
||||
if (DB::CurrentThread::isInitialized())
|
||||
{
|
||||
std::string timezone_from_context;
|
||||
const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext();
|
||||
|
||||
if (query_context)
|
||||
{
|
||||
timezone_from_context = extractTimezoneFromContext(query_context);
|
||||
|
||||
if (!timezone_from_context.empty())
|
||||
return date_lut.getImplementation(timezone_from_context);
|
||||
}
|
||||
|
||||
/// On the server side, timezone is passed in query_context,
|
||||
/// but on CH-client side we have no query context,
|
||||
/// and each time we modify client's global context
|
||||
const DB::ContextPtr global_context = DB::CurrentThread::get().getGlobalContext();
|
||||
if (global_context)
|
||||
{
|
||||
timezone_from_context = extractTimezoneFromContext(global_context);
|
||||
|
||||
if (!timezone_from_context.empty())
|
||||
return date_lut.getImplementation(timezone_from_context);
|
||||
}
|
||||
}
|
||||
return serverTimezoneInstance();
|
||||
}
|
||||
|
||||
DateLUT::DateLUT()
|
||||
{
|
||||
/// Initialize the pointer to the default DateLUTImpl.
|
||||
|
@ -1,17 +1,23 @@
|
||||
#pragma once
|
||||
|
||||
#include "DateLUTImpl.h"
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include "Common/CurrentThread.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class Context;
|
||||
using ContextPtr = std::shared_ptr<const Context>;
|
||||
}
|
||||
|
||||
class DateLUTImpl;
|
||||
|
||||
|
||||
/// This class provides lazy initialization and lookup of singleton DateLUTImpl objects for a given timezone.
|
||||
class DateLUT : private boost::noncopyable
|
||||
@ -20,38 +26,7 @@ public:
|
||||
/// Return DateLUTImpl instance for session timezone.
|
||||
/// session_timezone is a session-level setting.
|
||||
/// If setting is not set, returns the server timezone.
|
||||
static ALWAYS_INLINE const DateLUTImpl & instance()
|
||||
{
|
||||
const auto & date_lut = getInstance();
|
||||
|
||||
if (DB::CurrentThread::isInitialized())
|
||||
{
|
||||
std::string timezone_from_context;
|
||||
const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext();
|
||||
|
||||
if (query_context)
|
||||
{
|
||||
timezone_from_context = extractTimezoneFromContext(query_context);
|
||||
|
||||
if (!timezone_from_context.empty())
|
||||
return date_lut.getImplementation(timezone_from_context);
|
||||
}
|
||||
|
||||
/// On the server side, timezone is passed in query_context,
|
||||
/// but on CH-client side we have no query context,
|
||||
/// and each time we modify client's global context
|
||||
const DB::ContextPtr global_context = DB::CurrentThread::get().getGlobalContext();
|
||||
if (global_context)
|
||||
{
|
||||
timezone_from_context = extractTimezoneFromContext(global_context);
|
||||
|
||||
if (!timezone_from_context.empty())
|
||||
return date_lut.getImplementation(timezone_from_context);
|
||||
}
|
||||
|
||||
}
|
||||
return serverTimezoneInstance();
|
||||
}
|
||||
static const DateLUTImpl & instance();
|
||||
|
||||
static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone)
|
||||
{
|
||||
|
@ -1,8 +1,5 @@
|
||||
#include "DateLUTImpl.h"
|
||||
|
||||
#include <cctz/civil_time.h>
|
||||
#include <cctz/time_zone.h>
|
||||
#include <cctz/zone_info_source.h>
|
||||
#include <Core/DecimalFunctions.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <algorithm>
|
||||
@ -11,6 +8,10 @@
|
||||
#include <cstring>
|
||||
#include <memory>
|
||||
|
||||
#include <cctz/civil_time.h>
|
||||
#include <cctz/time_zone.h>
|
||||
#include <cctz/zone_info_source.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -214,6 +215,29 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int DateLUTImpl::toMillisecond(const DB::DateTime64 & datetime, Int64 scale_multiplier) const
|
||||
{
|
||||
constexpr Int64 millisecond_multiplier = 1'000;
|
||||
constexpr Int64 microsecond_multiplier = 1'000 * millisecond_multiplier;
|
||||
constexpr Int64 divider = microsecond_multiplier / millisecond_multiplier;
|
||||
|
||||
auto components = DB::DecimalUtils::splitWithScaleMultiplier(datetime, scale_multiplier);
|
||||
|
||||
if (datetime.value < 0 && components.fractional)
|
||||
{
|
||||
components.fractional = scale_multiplier + (components.whole ? Int64(-1) : Int64(1)) * components.fractional;
|
||||
--components.whole;
|
||||
}
|
||||
Int64 fractional = components.fractional;
|
||||
if (scale_multiplier > microsecond_multiplier)
|
||||
fractional = fractional / (scale_multiplier / microsecond_multiplier);
|
||||
else if (scale_multiplier < microsecond_multiplier)
|
||||
fractional = fractional * (microsecond_multiplier / scale_multiplier);
|
||||
|
||||
UInt16 millisecond = static_cast<UInt16>(fractional / divider);
|
||||
return millisecond;
|
||||
}
|
||||
|
||||
|
||||
/// Prefer to load timezones from blobs linked to the binary.
|
||||
/// The blobs are provided by "tzdata" library.
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <base/DayNum.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <Core/DecimalFunctions.h>
|
||||
|
||||
#include <ctime>
|
||||
#include <cassert>
|
||||
@ -50,6 +49,11 @@ enum class WeekDayMode
|
||||
WeekStartsSunday1 = 3
|
||||
};
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class DateTime64;
|
||||
}
|
||||
|
||||
/** Lookup table to conversion of time to date, and to month / year / day of week / day of month and so on.
|
||||
* First time was implemented for OLAPServer, that needed to do billions of such transformations.
|
||||
*/
|
||||
@ -593,29 +597,7 @@ public:
|
||||
return time % 60;
|
||||
}
|
||||
|
||||
template <typename DateOrTime>
|
||||
unsigned toMillisecond(const DateOrTime & datetime, Int64 scale_multiplier) const
|
||||
{
|
||||
constexpr Int64 millisecond_multiplier = 1'000;
|
||||
constexpr Int64 microsecond_multiplier = 1'000 * millisecond_multiplier;
|
||||
constexpr Int64 divider = microsecond_multiplier / millisecond_multiplier;
|
||||
|
||||
auto components = DB::DecimalUtils::splitWithScaleMultiplier(datetime, scale_multiplier);
|
||||
|
||||
if (datetime.value < 0 && components.fractional)
|
||||
{
|
||||
components.fractional = scale_multiplier + (components.whole ? Int64(-1) : Int64(1)) * components.fractional;
|
||||
--components.whole;
|
||||
}
|
||||
Int64 fractional = components.fractional;
|
||||
if (scale_multiplier > microsecond_multiplier)
|
||||
fractional = fractional / (scale_multiplier / microsecond_multiplier);
|
||||
else if (scale_multiplier < microsecond_multiplier)
|
||||
fractional = fractional * (microsecond_multiplier / scale_multiplier);
|
||||
|
||||
UInt16 millisecond = static_cast<UInt16>(fractional / divider);
|
||||
return millisecond;
|
||||
}
|
||||
unsigned toMillisecond(const DB::DateTime64 & datetime, Int64 scale_multiplier) const;
|
||||
|
||||
unsigned toMinute(Time t) const
|
||||
{
|
||||
|
@ -1,26 +1,27 @@
|
||||
#include "Exception.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <cxxabi.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <base/demangle.h>
|
||||
#include <Poco/String.h>
|
||||
#include <Common/AtomicLogger.h>
|
||||
#include <Common/ErrorCodes.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/LockMemoryExceptionInThread.h>
|
||||
#include <Common/MemorySanitizer.h>
|
||||
#include <Common/SensitiveDataMasker.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/formatReadable.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <cxxabi.h>
|
||||
|
||||
#include <Poco/String.h>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
|
@ -1,22 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <cerrno>
|
||||
#include <exception>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include <Poco/Exception.h>
|
||||
|
||||
#include <base/defines.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/int8_to_string.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <Common/AtomicLogger.h>
|
||||
#include <Common/Logger.h>
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
#include <Common/StackTrace.h>
|
||||
|
||||
#include <cerrno>
|
||||
#include <exception>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <Poco/Exception.h>
|
||||
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
@ -24,6 +22,8 @@ namespace Poco { class Logger; }
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class AtomicLogger;
|
||||
|
||||
[[noreturn]] void abortOnFailedAssertion(const String & description);
|
||||
|
||||
/// This flag can be set for testing purposes - to check that no exceptions are thrown.
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <base/JSON.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
|
@ -203,7 +203,7 @@ public:
|
||||
if (total_connections_in_group >= limits.warning_limit && total_connections_in_group >= mute_warning_until)
|
||||
{
|
||||
LOG_WARNING(log, "Too many active sessions in group {}, count {}, warning limit {}", type, total_connections_in_group, limits.warning_limit);
|
||||
mute_warning_until = roundUp(total_connections_in_group, limits.warning_step);
|
||||
mute_warning_until = roundUp(total_connections_in_group, HTTPConnectionPools::Limits::warning_step);
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,8 +295,13 @@ private:
|
||||
String getTarget() const
|
||||
{
|
||||
if (!Session::getProxyConfig().host.empty())
|
||||
return fmt::format("{} over proxy {}", Session::getHost(), Session::getProxyConfig().host);
|
||||
return Session::getHost();
|
||||
return fmt::format("{}:{} over proxy {}",
|
||||
Session::getHost(),
|
||||
Session::getPort(),
|
||||
Session::getProxyConfig().host);
|
||||
return fmt::format("{}:{}",
|
||||
Session::getHost(),
|
||||
Session::getPort());
|
||||
}
|
||||
|
||||
void flushRequest() override
|
||||
@ -472,7 +477,8 @@ public:
|
||||
String getTarget() const
|
||||
{
|
||||
if (!proxy_configuration.isEmpty())
|
||||
return fmt::format("{} over proxy {}", host, proxy_configuration.host);
|
||||
return fmt::format("{} over proxy {}",
|
||||
host, proxy_configuration.host);
|
||||
return host;
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ public:
|
||||
void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func)
|
||||
{
|
||||
DB::PrefetchingHelper prefetching;
|
||||
size_t prefetch_look_ahead = prefetching.getInitialLookAheadValue();
|
||||
size_t prefetch_look_ahead = DB::PrefetchingHelper::getInitialLookAheadValue();
|
||||
|
||||
size_t i = 0;
|
||||
auto prefetch_it = advanceIterator(this->begin(), prefetch_look_ahead);
|
||||
@ -216,10 +216,10 @@ public:
|
||||
{
|
||||
if constexpr (prefetch)
|
||||
{
|
||||
if (i == prefetching.iterationsToMeasure())
|
||||
if (i == DB::PrefetchingHelper::iterationsToMeasure())
|
||||
{
|
||||
prefetch_look_ahead = prefetching.calcPrefetchLookAhead();
|
||||
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - prefetching.getInitialLookAheadValue());
|
||||
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - DB::PrefetchingHelper::getInitialLookAheadValue());
|
||||
}
|
||||
|
||||
if (prefetch_it != end)
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#if USE_JEMALLOC
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <jemalloc/jemalloc.h>
|
||||
|
@ -1,9 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstring>
|
||||
#include <string>
|
||||
#include <exception>
|
||||
#include <string>
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
|
||||
|
||||
/** Stores a calendar date in broken-down form (year, month, day-in-month).
|
||||
|
@ -1,15 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <base/defines.h>
|
||||
|
||||
#include <Poco/Channel.h>
|
||||
#include <memory>
|
||||
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/Message.h>
|
||||
|
||||
using LoggerPtr = Poco::LoggerPtr;
|
||||
namespace Poco
|
||||
{
|
||||
class Channel;
|
||||
class Logger;
|
||||
using LoggerPtr = std::shared_ptr<Logger>;
|
||||
}
|
||||
|
||||
using LoggerPtr = std::shared_ptr<Poco::Logger>;
|
||||
using LoggerRawPtr = Poco::Logger *;
|
||||
|
||||
/** RAII wrappers around Poco/Logger.h.
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/DateLUTImpl.h>
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
@ -22,7 +22,7 @@ void protectMemoryRegion(void * addr, size_t len, int prot)
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t byte_size(size_t num_elements, size_t element_size)
|
||||
ALWAYS_INLINE size_t byte_size(size_t num_elements, size_t element_size)
|
||||
{
|
||||
size_t amount;
|
||||
if (__builtin_mul_overflow(num_elements, element_size, &amount))
|
||||
@ -30,7 +30,7 @@ size_t byte_size(size_t num_elements, size_t element_size)
|
||||
return amount;
|
||||
}
|
||||
|
||||
size_t minimum_memory_for_elements(size_t num_elements, size_t element_size, size_t pad_left, size_t pad_right)
|
||||
ALWAYS_INLINE size_t minimum_memory_for_elements(size_t num_elements, size_t element_size, size_t pad_left, size_t pad_right)
|
||||
{
|
||||
size_t amount;
|
||||
if (__builtin_add_overflow(byte_size(num_elements, element_size), pad_left + pad_right, &amount))
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/ProfileEventsScope.h>
|
||||
|
||||
namespace DB
|
||||
|
@ -1,7 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,15 +1,16 @@
|
||||
#include "QueryProfiler.h"
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/TraceSender.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <base/phdr_cache.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <Common/StackTrace.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
#include <Common/TraceSender.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <base/defines.h>
|
||||
#include <base/phdr_cache.h>
|
||||
#include <base/errnoToString.h>
|
||||
#include <Common/thread_local_rng.h>
|
||||
|
||||
#include <random>
|
||||
|
||||
|
@ -366,7 +366,7 @@ String demangleAndCollapseNames(std::optional<std::string_view> file, const char
|
||||
if (file.has_value())
|
||||
{
|
||||
std::string_view file_copy = file.value();
|
||||
if (auto trim_pos = file_copy.find_last_of('/'); trim_pos != file_copy.npos)
|
||||
if (auto trim_pos = file_copy.find_last_of('/'); trim_pos != std::string_view::npos)
|
||||
file_copy.remove_suffix(file_copy.size() - trim_pos);
|
||||
if (file_copy.ends_with("functional"))
|
||||
return "?";
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user