mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'ClickHouse:master' into 56257_parse_crlf_with_TSV_files
This commit is contained in:
commit
971263c4e6
@ -96,7 +96,6 @@ Checks: [
|
||||
'-modernize-use-default-member-init',
|
||||
'-modernize-use-emplace',
|
||||
'-modernize-use-nodiscard',
|
||||
'-modernize-use-override',
|
||||
'-modernize-use-trailing-return-type',
|
||||
|
||||
'-performance-inefficient-string-concatenation',
|
||||
|
@ -13,8 +13,6 @@
|
||||
#include <tuple>
|
||||
#include <limits>
|
||||
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
|
||||
/// Use same extended double for all platforms
|
||||
@ -22,6 +20,7 @@
|
||||
#define CONSTEXPR_FROM_DOUBLE constexpr
|
||||
using FromDoubleIntermediateType = long double;
|
||||
#else
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
|
||||
#define CONSTEXPR_FROM_DOUBLE
|
||||
@ -309,6 +308,13 @@ struct integer<Bits, Signed>::_impl
|
||||
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
|
||||
static_assert(std::is_same_v<T, double> || std::is_same_v<T, FromDoubleIntermediateType>);
|
||||
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
|
||||
#if (LDBL_MANT_DIG == 64)
|
||||
if (!std::isfinite(t))
|
||||
{
|
||||
self = 0;
|
||||
return;
|
||||
}
|
||||
#else
|
||||
if constexpr (std::is_same_v<T, double>)
|
||||
{
|
||||
if (!std::isfinite(t))
|
||||
@ -325,6 +331,7 @@ struct integer<Bits, Signed>::_impl
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
const T alpha = t / static_cast<T>(max_int);
|
||||
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 08ac76ea80a37f89b12109c805eafe9f1dc9b991
|
||||
Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0
|
@ -51,6 +51,12 @@ else()
|
||||
target_compile_definitions(_nuraft PRIVATE USE_BOOST_ASIO=1 BOOST_ASIO_STANDALONE=1)
|
||||
endif()
|
||||
|
||||
target_link_libraries (_nuraft PRIVATE clickhouse_common_io)
|
||||
# We must have it PUBLIC here because some headers which depend on it directly
|
||||
# included in clickhouse
|
||||
target_compile_definitions(_nuraft PUBLIC USE_CLICKHOUSE_THREADS=1)
|
||||
MESSAGE(STATUS "Will use clickhouse threads for NuRaft")
|
||||
|
||||
target_include_directories (_nuraft SYSTEM PRIVATE "${LIBRARY_DIR}/include/libnuraft")
|
||||
# for some reason include "asio.h" directly without "boost/" prefix.
|
||||
target_include_directories (_nuraft SYSTEM PRIVATE "${ClickHouse_SOURCE_DIR}/contrib/boost/boost")
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.3.1.2672"
|
||||
ARG VERSION="24.3.2.23"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.3.1.2672"
|
||||
ARG VERSION="24.3.2.23"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -27,7 +27,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.3.1.2672"
|
||||
ARG VERSION="24.3.2.23"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -16,6 +16,8 @@ ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb
|
||||
dpkg -i package_folder/clickhouse-library-bridge_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
|
29
docs/changelogs/v24.3.2.23-lts.md
Normal file
29
docs/changelogs/v24.3.2.23-lts.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.2.23-lts (8b7d910960c) FIXME as compared to v24.3.1.2672-lts (2c5c589a882)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix logical error in group_by_use_nulls + grouping set + analyzer + materialize/constant [#61567](https://github.com/ClickHouse/ClickHouse/pull/61567) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix external table cannot parse data type Bool [#62115](https://github.com/ClickHouse/ClickHouse/pull/62115) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Revert "Merge pull request [#61564](https://github.com/ClickHouse/ClickHouse/issues/61564) from liuneng1994/optimize_in_single_value" [#62135](https://github.com/ClickHouse/ClickHouse/pull/62135) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### CI Fix or Improvement (changelog entry is not required)
|
||||
|
||||
* Backported in [#62030](https://github.com/ClickHouse/ClickHouse/issues/62030):. [#61869](https://github.com/ClickHouse/ClickHouse/pull/61869) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||
* Backported in [#62057](https://github.com/ClickHouse/ClickHouse/issues/62057): ... [#62044](https://github.com/ClickHouse/ClickHouse/pull/62044) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#62204](https://github.com/ClickHouse/ClickHouse/issues/62204):. [#62190](https://github.com/ClickHouse/ClickHouse/pull/62190) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix some crashes with analyzer and group_by_use_nulls. [#61933](https://github.com/ClickHouse/ClickHouse/pull/61933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix scalars create as select [#61998](https://github.com/ClickHouse/ClickHouse/pull/61998) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Ignore IfChainToMultiIfPass if returned type changed. [#62059](https://github.com/ClickHouse/ClickHouse/pull/62059) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix type for ConvertInToEqualPass [#62066](https://github.com/ClickHouse/ClickHouse/pull/62066) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Revert output Pretty in tty [#62090](https://github.com/ClickHouse/ClickHouse/pull/62090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
@ -68,6 +68,12 @@ In the results of `SELECT` query, the values of `AggregateFunction` type have im
|
||||
|
||||
## Example of an Aggregated Materialized View {#example-of-an-aggregated-materialized-view}
|
||||
|
||||
The following examples assumes that you have a database named `test` so make sure you create that if it doesn't already exist:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE test;
|
||||
```
|
||||
|
||||
We will create the table `test.visits` that contain the raw data:
|
||||
|
||||
``` sql
|
||||
@ -80,17 +86,24 @@ CREATE TABLE test.visits
|
||||
) ENGINE = MergeTree ORDER BY (StartDate, CounterID);
|
||||
```
|
||||
|
||||
Next, we need to create an `AggregatingMergeTree` table that will store `AggregationFunction`s that keep track of the total number of visits and the number of unique users.
|
||||
|
||||
`AggregatingMergeTree` materialized view that watches the `test.visits` table, and use the `AggregateFunction` type:
|
||||
|
||||
``` sql
|
||||
CREATE MATERIALIZED VIEW test.mv_visits
|
||||
(
|
||||
CREATE TABLE test.agg_visits (
|
||||
StartDate DateTime64 NOT NULL,
|
||||
CounterID UInt64,
|
||||
Visits AggregateFunction(sum, Nullable(Int32)),
|
||||
Users AggregateFunction(uniq, Nullable(Int32))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree() ORDER BY (StartDate, CounterID)
|
||||
ENGINE = AggregatingMergeTree() ORDER BY (StartDate, CounterID);
|
||||
```
|
||||
|
||||
And then let's create a materialized view that populates `test.agg_visits` from `test.visits` :
|
||||
|
||||
```sql
|
||||
CREATE MATERIALIZED VIEW test.visits_mv TO test.agg_visits
|
||||
AS SELECT
|
||||
StartDate,
|
||||
CounterID,
|
||||
@ -104,25 +117,45 @@ Inserting data into the `test.visits` table.
|
||||
|
||||
``` sql
|
||||
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
VALUES (1667446031, 1, 3, 4)
|
||||
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
VALUES (1667446031, 1, 6, 3)
|
||||
VALUES (1667446031000, 1, 3, 4), (1667446031000, 1, 6, 3);
|
||||
```
|
||||
|
||||
The data is inserted in both the table and the materialized view `test.mv_visits`.
|
||||
The data is inserted in both `test.visits` and `test.agg_visits`.
|
||||
|
||||
To get the aggregated data, we need to execute a query such as `SELECT ... GROUP BY ...` from the materialized view `test.mv_visits`:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT
|
||||
StartDate,
|
||||
sumMerge(Visits) AS Visits,
|
||||
uniqMerge(Users) AS Users
|
||||
FROM test.mv_visits
|
||||
FROM test.agg_visits
|
||||
GROUP BY StartDate
|
||||
ORDER BY StartDate;
|
||||
```
|
||||
|
||||
```text
|
||||
┌───────────────StartDate─┬─Visits─┬─Users─┐
|
||||
│ 2022-11-03 03:27:11.000 │ 9 │ 2 │
|
||||
└─────────────────────────┴────────┴───────┘
|
||||
```
|
||||
|
||||
And how about if we add another couple of records to `test.visits`, but this time we'll use a different timestamp for one of the records:
|
||||
|
||||
```sql
|
||||
INSERT INTO test.visits (StartDate, CounterID, Sign, UserID)
|
||||
VALUES (1669446031000, 2, 5, 10), (1667446031000, 3, 7, 5);
|
||||
```
|
||||
|
||||
If we then run the `SELECT` query again, we'll see the following output:
|
||||
|
||||
```text
|
||||
┌───────────────StartDate─┬─Visits─┬─Users─┐
|
||||
│ 2022-11-03 03:27:11.000 │ 16 │ 3 │
|
||||
│ 2022-11-26 07:00:31.000 │ 5 │ 1 │
|
||||
└─────────────────────────┴────────┴───────┘
|
||||
```
|
||||
|
||||
## Related Content
|
||||
|
||||
- Blog: [Using Aggregate Combinators in ClickHouse](https://clickhouse.com/blog/aggregate-functions-combinators-in-clickhouse-for-arrays-maps-and-states)
|
||||
|
@ -18,6 +18,9 @@ Run the command:
|
||||
|
||||
```bash
|
||||
wget https://s3.amazonaws.com/menusdata.nypl.org/gzips/2021_08_01_07_01_17_data.tgz
|
||||
# Option: Validate the checksum
|
||||
md5sum 2021_08_01_07_01_17_data.tgz
|
||||
# Checksum should be equal to: db6126724de939a5481e3160a2d67d15
|
||||
```
|
||||
|
||||
Replace the link to the up to date link from http://menus.nypl.org/data if needed.
|
||||
|
@ -79,7 +79,7 @@ The supported formats are:
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✗ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
@ -1488,7 +1488,7 @@ Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000
|
||||
- [output_format_pretty_max_value_width](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_value_width) - Maximum width of value to display in Pretty formats. If greater - it will be cut. Default value - `10000`.
|
||||
- [output_format_pretty_color](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`.
|
||||
- [output_format_pretty_grid_charset](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `false`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `true`.
|
||||
|
||||
## RowBinary {#rowbinary}
|
||||
|
||||
@ -2466,7 +2466,7 @@ Result:
|
||||
|
||||
## Npy {#data-format-npy}
|
||||
|
||||
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
|
||||
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
|
||||
| Npy type | ClickHouse type |
|
||||
|:--------:|:---------------:|
|
||||
| b1 | UInt8 |
|
||||
|
@ -507,16 +507,18 @@ Example:
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<url><![CDATA[regex:/query_param_with_url/(?P<name_1>[^/]+)]]></url>
|
||||
<methods>GET</methods>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
<PARAMS_XXX><![CDATA[regex:(?P<name_2>[^/]+)]]></PARAMS_XXX>
|
||||
</headers>
|
||||
<handler>
|
||||
<type>predefined_query_handler</type>
|
||||
<query>SELECT value FROM system.settings WHERE name = {name_1:String}</query>
|
||||
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query>
|
||||
<query>
|
||||
SELECT name, value FROM system.settings
|
||||
WHERE name IN ({name_1:String}, {name_2:String})
|
||||
</query>
|
||||
</handler>
|
||||
</rule>
|
||||
<defaults/>
|
||||
@ -524,13 +526,13 @@ Example:
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_final_threads?max_threads=1&max_final_threads=2'
|
||||
1
|
||||
max_final_threads 2
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_final_threads' 'http://localhost:8123/query_param_with_url/max_threads?max_threads=1&max_final_threads=2'
|
||||
max_final_threads 2
|
||||
max_threads 1
|
||||
```
|
||||
|
||||
:::note
|
||||
In one `predefined_query_handler` only supports one `query` of an insert type.
|
||||
In one `predefined_query_handler` only one `query` is supported.
|
||||
:::
|
||||
|
||||
### dynamic_query_handler {#dynamic_query_handler}
|
||||
|
@ -436,7 +436,7 @@ Default: 0
|
||||
Restriction on dropping partitions.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you can’t drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query.
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
Default value: 50 GB.
|
||||
The value 0 means that you can drop partitions without any restrictions.
|
||||
|
||||
@ -518,7 +518,7 @@ Restriction on deleting tables.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
The value 0 means that you can delete all tables without any restrictions.
|
||||
@ -1570,7 +1570,7 @@ Restriction on deleting tables.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
|
||||
@ -1588,7 +1588,7 @@ Restriction on dropping partitions.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you can’t drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
|
||||
|
@ -1648,7 +1648,7 @@ Possible values:
|
||||
- 0 — Output without row numbers.
|
||||
- 1 — Output with row numbers.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `1`.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -36,7 +36,7 @@ E.g. configuration option
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
```
|
||||
|
||||
@ -47,7 +47,7 @@ is equal to configuration (from `24.1`):
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>local</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
```
|
||||
|
||||
@ -56,7 +56,7 @@ Configuration
|
||||
<s3_plain>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -67,7 +67,7 @@ is equal to
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>plain</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -79,7 +79,7 @@ Example of full storage configuration will look like:
|
||||
<s3>
|
||||
<type>s3</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
@ -105,7 +105,7 @@ Starting with 24.1 clickhouse version, it can also look like:
|
||||
<object_storage_type>s3</object_storage_type>
|
||||
<metadata_type>local</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3>
|
||||
</disks>
|
||||
<policies>
|
||||
@ -324,7 +324,7 @@ Configuration:
|
||||
<s3_plain>
|
||||
<type>s3_plain</type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
@ -337,7 +337,7 @@ Configuration:
|
||||
<object_storage_type>azure</object_storage_type>
|
||||
<metadata_type>plain</metadata_type>
|
||||
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
|
||||
<use_invironment_credentials>1</use_invironment_credentials>
|
||||
<use_environment_credentials>1</use_environment_credentials>
|
||||
</s3_plain>
|
||||
```
|
||||
|
||||
|
@ -7,26 +7,33 @@ sidebar_position: 351
|
||||
|
||||
[Cramer's V](https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V) (sometimes referred to as Cramer's phi) is a measure of association between two columns in a table. The result of the `cramersV` function ranges from 0 (corresponding to no association between the variables) to 1 and can reach 1 only when each value is completely determined by the other. It may be viewed as the association between two variables as a percentage of their maximum possible variation.
|
||||
|
||||
:::note
|
||||
For a bias corrected version of Cramer's V see: [cramersVBiasCorrected](./cramersvbiascorrected.md)
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
cramersV(column1, column2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Parameters**
|
||||
|
||||
- `column1` and `column2` are the columns to be compared
|
||||
- `column1`: first column to be compared.
|
||||
- `column2`: second column to be compared.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- a value between 0 (corresponding to no association between the columns' values) to 1 (complete association).
|
||||
|
||||
**Return type** is always [Float64](../../../sql-reference/data-types/float.md).
|
||||
Type: always [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The following two columns being compared below have no association with each other, so the result of `cramersV` is 0:
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
cramersV(a, b)
|
||||
|
@ -5,31 +5,31 @@ sidebar_position: 352
|
||||
|
||||
# cramersVBiasCorrected
|
||||
|
||||
|
||||
Cramer's V is a measure of association between two columns in a table. The result of the [`cramersV` function](./cramersv.md) ranges from 0 (corresponding to no association between the variables) to 1 and can reach 1 only when each value is completely determined by the other. The function can be heavily biased, so this version of Cramer's V uses the [bias correction](https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V#Bias_correction).
|
||||
|
||||
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
cramersVBiasCorrected(column1, column2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
**Parameters**
|
||||
|
||||
- `column1` and `column2` are the columns to be compared
|
||||
- `column1`: first column to be compared.
|
||||
- `column2`: second column to be compared.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- a value between 0 (corresponding to no association between the columns' values) to 1 (complete association).
|
||||
|
||||
**Return type** is always [Float64](../../../sql-reference/data-types/float.md).
|
||||
Type: always [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
The following two columns being compared below have a small association with each other. Notice the result of `cramersVBiasCorrected` is smaller than the result of `cramersV`:
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
cramersV(a, b),
|
||||
|
@ -36,9 +36,9 @@ You can explicitly set a time zone for `DateTime`-type columns when creating a t
|
||||
|
||||
The [clickhouse-client](../../interfaces/cli.md) applies the server time zone by default if a time zone isn’t explicitly set when initializing the data type. To use the client time zone, run `clickhouse-client` with the `--use_client_time_zone` parameter.
|
||||
|
||||
ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings.md#settings-date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function.
|
||||
ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings-formats.md#date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function.
|
||||
|
||||
When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings.md#settings-date_time_input_format) setting.
|
||||
When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings-formats.md#date_time_input_format) setting.
|
||||
|
||||
## Examples
|
||||
|
||||
@ -147,8 +147,8 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
|
||||
- [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md)
|
||||
- [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Functions for working with arrays](../../sql-reference/functions/array-functions.md)
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#settings-date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#settings-date_time_output_format)
|
||||
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format)
|
||||
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format)
|
||||
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
|
||||
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)
|
||||
|
@ -190,22 +190,67 @@ SELECT toTypeName(variantType(v)) FROM test LIMIT 1;
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Conversion between Variant column and other columns
|
||||
## Conversion between a Variant column and other columns
|
||||
|
||||
There are 3 possible conversions that can be performed with Variant column.
|
||||
There are 4 possible conversions that can be performed with a column of type `Variant`.
|
||||
|
||||
### Converting an ordinary column to a Variant column
|
||||
### Converting a String column to a Variant column
|
||||
|
||||
It is possible to convert ordinary column with type `T` to a `Variant` column containing this type:
|
||||
Conversion from `String` to `Variant` is performed by parsing a value of `Variant` type from the string value:
|
||||
|
||||
```sql
|
||||
SELECT toTypeName(variant) as type_name, 'Hello, World!'::Variant(UInt64, String, Array(UInt64)) as variant;
|
||||
SELECT '42'::Variant(String, UInt64) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```text
|
||||
┌─type_name──────────────────────────────┬─variant───────┐
|
||||
│ Variant(Array(UInt64), String, UInt64) │ Hello, World! │
|
||||
└────────────────────────────────────────┴───────────────┘
|
||||
┌─variant─┬─variant_type─┐
|
||||
│ 42 │ UInt64 │
|
||||
└─────────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT '[1, 2, 3]'::Variant(String, Array(UInt64)) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variant─┬─variant_type──┐
|
||||
│ [1,2,3] │ Array(UInt64) │
|
||||
└─────────┴───────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT CAST(map('key1', '42', 'key2', 'true', 'key3', '2020-01-01'), 'Map(String, Variant(UInt64, Bool, Date))') as map_of_variants, mapApply((k, v) -> (k, variantType(v)), map_of_variants) as map_of_variant_types```
|
||||
```
|
||||
|
||||
```text
|
||||
┌─map_of_variants─────────────────────────────┬─map_of_variant_types──────────────────────────┐
|
||||
│ {'key1':42,'key2':true,'key3':'2020-01-01'} │ {'key1':'UInt64','key2':'Bool','key3':'Date'} │
|
||||
└─────────────────────────────────────────────┴───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Converting an ordinary column to a Variant column
|
||||
|
||||
It is possible to convert an ordinary column with type `T` to a `Variant` column containing this type:
|
||||
|
||||
```sql
|
||||
SELECT toTypeName(variant) as type_name, [1,2,3]::Array(UInt64)::Variant(UInt64, String, Array(UInt64)) as variant, variantType(variant) as variant_name
|
||||
```
|
||||
|
||||
```text
|
||||
┌─type_name──────────────────────────────┬─variant─┬─variant_name──┐
|
||||
│ Variant(Array(UInt64), String, UInt64) │ [1,2,3] │ Array(UInt64) │
|
||||
└────────────────────────────────────────┴─────────┴───────────────┘
|
||||
```
|
||||
|
||||
Note: converting from `String` type is always performed through parsing, if you need to convert `String` column to `String` variant of a `Variant` without parsing, you can do the following:
|
||||
```sql
|
||||
SELECT '[1, 2, 3]'::Variant(String)::Variant(String, Array(UInt64), UInt64) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```sql
|
||||
┌─variant───┬─variant_type─┐
|
||||
│ [1, 2, 3] │ String │
|
||||
└───────────┴──────────────┘
|
||||
```
|
||||
|
||||
### Converting a Variant column to an ordinary column
|
||||
@ -395,3 +440,37 @@ SELECT v, variantType(v) FROM test ORDER by v;
|
||||
│ 100 │ UInt32 │
|
||||
└─────┴────────────────┘
|
||||
```
|
||||
|
||||
## JSONExtract functions with Variant
|
||||
|
||||
All `JSONExtract*` functions support `Variant` type:
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Variant(UInt32, String, Array(UInt32))') AS variant, variantType(variant) AS variant_type;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variant─┬─variant_type──┐
|
||||
│ [1,2,3] │ Array(UInt32) │
|
||||
└─────────┴───────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_variants, mapApply((k, v) -> (k, variantType(v)), map_of_variants) AS map_of_variant_types
|
||||
```
|
||||
|
||||
```text
|
||||
┌─map_of_variants──────────────────┬─map_of_variant_types────────────────────────────┐
|
||||
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
|
||||
└──────────────────────────────────┴─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS variants, arrayMap(x -> (x.1, variantType(x.2)), variants) AS variant_types
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variants───────────────────────────────┬─variant_types─────────────────────────────────────────┐
|
||||
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
|
||||
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
@ -8,7 +8,7 @@ sidebar_label: VIEW
|
||||
|
||||
You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process.
|
||||
|
||||
This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underling storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause.
|
||||
This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underlying storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause.
|
||||
|
||||
**Example with TO table**
|
||||
|
||||
|
@ -12,25 +12,23 @@ Some of the calculations that you can do are similar to those that can be done w
|
||||
|
||||
ClickHouse supports the standard grammar for defining windows and window functions. The table below indicates whether a feature is currently supported.
|
||||
|
||||
| Feature | Support or workaround |
|
||||
| Feature | Supported? |
|
||||
|------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| ad hoc window specification (`count(*) over (partition by id order by time desc)`) | supported |
|
||||
| expressions involving window functions, e.g. `(count(*) over ()) / 2)` | supported |
|
||||
| `WINDOW` clause (`select ... from table window w as (partition by id)`) | supported |
|
||||
| `ROWS` frame | supported |
|
||||
| `RANGE` frame | supported, the default |
|
||||
| `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | not supported, specify the number of seconds instead (`RANGE` works with any numeric type). |
|
||||
| `GROUPS` frame | not supported |
|
||||
| Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | all aggregate functions are supported |
|
||||
| `rank()`, `dense_rank()`, `row_number()` | supported |
|
||||
| `lag/lead(value, offset)` | Not supported. Workarounds: |
|
||||
| | 1) replace with `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead` |
|
||||
| | 2) use `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` |
|
||||
| ntile(buckets) | Supported. Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). |
|
||||
| ad hoc window specification (`count(*) over (partition by id order by time desc)`) | ✅ |
|
||||
| expressions involving window functions, e.g. `(count(*) over ()) / 2)` | ✅ |
|
||||
| `WINDOW` clause (`select ... from table window w as (partition by id)`) | ✅ |
|
||||
| `ROWS` frame | ✅ |
|
||||
| `RANGE` frame | ✅ (the default) |
|
||||
| `INTERVAL` syntax for `DateTime` `RANGE OFFSET` frame | ❌ (specify the number of seconds instead (`RANGE` works with any numeric type).) |
|
||||
| `GROUPS` frame | ❌ |
|
||||
| Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) |
|
||||
| `rank()`, `dense_rank()`, `row_number()` | ✅ |
|
||||
| `lag/lead(value, offset)` | ❌ <br/> You can use one of the following workarounds:<br/> 1) `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead` <br/> 2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` |
|
||||
| ntile(buckets) | ✅ <br/> Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). |
|
||||
|
||||
## ClickHouse-specific Window Functions
|
||||
|
||||
There are also the following window function that's specific to ClickHouse:
|
||||
There is also the following ClickHouse specific window function:
|
||||
|
||||
### nonNegativeDerivative(metric_column, timestamp_column[, INTERVAL X UNITS])
|
||||
|
||||
@ -89,6 +87,102 @@ These functions can be used only as a window function.
|
||||
|
||||
Let's have a look at some examples of how window functions can be used.
|
||||
|
||||
### Numbering rows
|
||||
|
||||
```sql
|
||||
CREATE TABLE salaries
|
||||
(
|
||||
`team` String,
|
||||
`player` String,
|
||||
`salary` UInt32,
|
||||
`position` String
|
||||
)
|
||||
Engine = Memory;
|
||||
|
||||
INSERT INTO salaries FORMAT Values
|
||||
('Port Elizabeth Barbarians', 'Gary Chen', 195000, 'F'),
|
||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||
('Port Elizabeth Barbarians', 'Michael Stanley', 150000, 'D'),
|
||||
('New Coreystad Archdukes', 'Scott Harrison', 150000, 'D'),
|
||||
('Port Elizabeth Barbarians', 'Robert George', 195000, 'M');
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
row_number() OVER (ORDER BY salary) AS row
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─player──────────┬─salary─┬─row─┐
|
||||
│ Michael Stanley │ 150000 │ 1 │
|
||||
│ Scott Harrison │ 150000 │ 2 │
|
||||
│ Charles Juarez │ 190000 │ 3 │
|
||||
│ Gary Chen │ 195000 │ 4 │
|
||||
│ Robert George │ 195000 │ 5 │
|
||||
└─────────────────┴────────┴─────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT player, salary,
|
||||
row_number() OVER (ORDER BY salary) AS row,
|
||||
rank() OVER (ORDER BY salary) AS rank,
|
||||
dense_rank() OVER (ORDER BY salary) AS denseRank
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─player──────────┬─salary─┬─row─┬─rank─┬─denseRank─┐
|
||||
│ Michael Stanley │ 150000 │ 1 │ 1 │ 1 │
|
||||
│ Scott Harrison │ 150000 │ 2 │ 1 │ 1 │
|
||||
│ Charles Juarez │ 190000 │ 3 │ 3 │ 2 │
|
||||
│ Gary Chen │ 195000 │ 4 │ 4 │ 3 │
|
||||
│ Robert George │ 195000 │ 5 │ 4 │ 3 │
|
||||
└─────────────────┴────────┴─────┴──────┴───────────┘
|
||||
```
|
||||
|
||||
### Aggregation functions
|
||||
|
||||
Compare each player's salary to the average for their team.
|
||||
|
||||
```sql
|
||||
SELECT player, salary, team,
|
||||
avg(salary) OVER (PARTITION BY team) AS teamAvg,
|
||||
salary - teamAvg AS diff
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─player──────────┬─salary─┬─team──────────────────────┬─teamAvg─┬───diff─┐
|
||||
│ Charles Juarez │ 190000 │ New Coreystad Archdukes │ 170000 │ 20000 │
|
||||
│ Scott Harrison │ 150000 │ New Coreystad Archdukes │ 170000 │ -20000 │
|
||||
│ Gary Chen │ 195000 │ Port Elizabeth Barbarians │ 180000 │ 15000 │
|
||||
│ Michael Stanley │ 150000 │ Port Elizabeth Barbarians │ 180000 │ -30000 │
|
||||
│ Robert George │ 195000 │ Port Elizabeth Barbarians │ 180000 │ 15000 │
|
||||
└─────────────────┴────────┴───────────────────────────┴─────────┴────────┘
|
||||
```
|
||||
|
||||
Compare each player's salary to the maximum for their team.
|
||||
|
||||
```sql
|
||||
SELECT player, salary, team,
|
||||
max(salary) OVER (PARTITION BY team) AS teamAvg,
|
||||
salary - teamAvg AS diff
|
||||
FROM salaries;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─player──────────┬─salary─┬─team──────────────────────┬─teamAvg─┬───diff─┐
|
||||
│ Charles Juarez │ 190000 │ New Coreystad Archdukes │ 190000 │ 0 │
|
||||
│ Scott Harrison │ 150000 │ New Coreystad Archdukes │ 190000 │ -40000 │
|
||||
│ Gary Chen │ 195000 │ Port Elizabeth Barbarians │ 195000 │ 0 │
|
||||
│ Michael Stanley │ 150000 │ Port Elizabeth Barbarians │ 195000 │ -45000 │
|
||||
│ Robert George │ 195000 │ Port Elizabeth Barbarians │ 195000 │ 0 │
|
||||
└─────────────────┴────────┴───────────────────────────┴─────────┴────────┘
|
||||
```
|
||||
|
||||
### Partitioning by column
|
||||
|
||||
```sql
|
||||
CREATE TABLE wf_partition
|
||||
(
|
||||
@ -120,6 +214,8 @@ ORDER BY
|
||||
└──────────┴───────┴───────┴──────────────┘
|
||||
```
|
||||
|
||||
### Frame bounding
|
||||
|
||||
```sql
|
||||
CREATE TABLE wf_frame
|
||||
(
|
||||
@ -131,14 +227,19 @@ ENGINE = Memory;
|
||||
|
||||
INSERT INTO wf_frame FORMAT Values
|
||||
(1,1,1), (1,2,2), (1,3,3), (1,4,4), (1,5,5);
|
||||
```
|
||||
|
||||
-- frame is bounded by bounds of a partition (BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
```sql
|
||||
-- Frame is bounded by bounds of a partition (BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
SELECT
|
||||
part_key,
|
||||
value,
|
||||
order,
|
||||
groupArray(value) OVER (PARTITION BY part_key ORDER BY order ASC
|
||||
Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS frame_values
|
||||
groupArray(value) OVER (
|
||||
PARTITION BY part_key
|
||||
ORDER BY order ASC
|
||||
Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
|
||||
) AS frame_values
|
||||
FROM wf_frame
|
||||
ORDER BY
|
||||
part_key ASC,
|
||||
@ -151,7 +252,9 @@ ORDER BY
|
||||
│ 1 │ 4 │ 4 │ [1,2,3,4,5] │
|
||||
│ 1 │ 5 │ 5 │ [1,2,3,4,5] │
|
||||
└──────────┴───────┴───────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
-- short form - no bound expression, no order by
|
||||
SELECT
|
||||
part_key,
|
||||
@ -169,14 +272,19 @@ ORDER BY
|
||||
│ 1 │ 4 │ 4 │ [1,2,3,4,5] │
|
||||
│ 1 │ 5 │ 5 │ [1,2,3,4,5] │
|
||||
└──────────┴───────┴───────┴──────────────┘
|
||||
```
|
||||
|
||||
-- frame is bounded by the beggining of a partition and the current row
|
||||
```sql
|
||||
-- frame is bounded by the beginning of a partition and the current row
|
||||
SELECT
|
||||
part_key,
|
||||
value,
|
||||
order,
|
||||
groupArray(value) OVER (PARTITION BY part_key ORDER BY order ASC
|
||||
Rows BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS frame_values
|
||||
groupArray(value) OVER (
|
||||
PARTITION BY part_key
|
||||
ORDER BY order ASC
|
||||
Rows BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
|
||||
) AS frame_values
|
||||
FROM wf_frame
|
||||
ORDER BY
|
||||
part_key ASC,
|
||||
@ -189,8 +297,10 @@ ORDER BY
|
||||
│ 1 │ 4 │ 4 │ [1,2,3,4] │
|
||||
│ 1 │ 5 │ 5 │ [1,2,3,4,5] │
|
||||
└──────────┴───────┴───────┴──────────────┘
|
||||
```
|
||||
|
||||
-- short form (frame is bounded by the beggining of a partition and the current row)
|
||||
```sql
|
||||
-- short form (frame is bounded by the beginning of a partition and the current row)
|
||||
SELECT
|
||||
part_key,
|
||||
value,
|
||||
@ -207,8 +317,10 @@ ORDER BY
|
||||
│ 1 │ 4 │ 4 │ [1,2,3,4] │
|
||||
│ 1 │ 5 │ 5 │ [1,2,3,4,5] │
|
||||
└──────────┴───────┴───────┴──────────────┘
|
||||
```
|
||||
|
||||
-- frame is bounded by the beggining of a partition and the current row, but order is backward
|
||||
```sql
|
||||
-- frame is bounded by the beginning of a partition and the current row, but order is backward
|
||||
SELECT
|
||||
part_key,
|
||||
value,
|
||||
@ -225,14 +337,19 @@ ORDER BY
|
||||
│ 1 │ 4 │ 4 │ [5,4] │
|
||||
│ 1 │ 5 │ 5 │ [5] │
|
||||
└──────────┴───────┴───────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
-- sliding frame - 1 PRECEDING ROW AND CURRENT ROW
|
||||
SELECT
|
||||
part_key,
|
||||
value,
|
||||
order,
|
||||
groupArray(value) OVER (PARTITION BY part_key ORDER BY order ASC
|
||||
Rows BETWEEN 1 PRECEDING AND CURRENT ROW) AS frame_values
|
||||
groupArray(value) OVER (
|
||||
PARTITION BY part_key
|
||||
ORDER BY order ASC
|
||||
Rows BETWEEN 1 PRECEDING AND CURRENT ROW
|
||||
) AS frame_values
|
||||
FROM wf_frame
|
||||
ORDER BY
|
||||
part_key ASC,
|
||||
@ -245,14 +362,19 @@ ORDER BY
|
||||
│ 1 │ 4 │ 4 │ [3,4] │
|
||||
│ 1 │ 5 │ 5 │ [4,5] │
|
||||
└──────────┴───────┴───────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
-- sliding frame - Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING
|
||||
SELECT
|
||||
part_key,
|
||||
value,
|
||||
order,
|
||||
groupArray(value) OVER (PARTITION BY part_key ORDER BY order ASC
|
||||
Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS frame_values
|
||||
groupArray(value) OVER (
|
||||
PARTITION BY part_key
|
||||
ORDER BY order ASC
|
||||
Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING
|
||||
) AS frame_values
|
||||
FROM wf_frame
|
||||
ORDER BY
|
||||
part_key ASC,
|
||||
@ -264,7 +386,9 @@ ORDER BY
|
||||
│ 1 │ 4 │ 4 │ [3,4,5] │
|
||||
│ 1 │ 5 │ 5 │ [4,5] │
|
||||
└──────────┴───────┴───────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
-- row_number does not respect the frame, so rn_1 = rn_2 = rn_3 != rn_4
|
||||
SELECT
|
||||
part_key,
|
||||
@ -278,8 +402,11 @@ SELECT
|
||||
FROM wf_frame
|
||||
WINDOW
|
||||
w1 AS (PARTITION BY part_key ORDER BY order DESC),
|
||||
w2 AS (PARTITION BY part_key ORDER BY order DESC
|
||||
Rows BETWEEN 1 PRECEDING AND CURRENT ROW)
|
||||
w2 AS (
|
||||
PARTITION BY part_key
|
||||
ORDER BY order DESC
|
||||
Rows BETWEEN 1 PRECEDING AND CURRENT ROW
|
||||
)
|
||||
ORDER BY
|
||||
part_key ASC,
|
||||
value ASC;
|
||||
@ -290,7 +417,9 @@ ORDER BY
|
||||
│ 1 │ 4 │ 4 │ [5,4] │ 2 │ 2 │ 2 │ 2 │
|
||||
│ 1 │ 5 │ 5 │ [5] │ 1 │ 1 │ 1 │ 1 │
|
||||
└──────────┴───────┴───────┴──────────────┴──────┴──────┴──────┴──────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
-- first_value and last_value respect the frame
|
||||
SELECT
|
||||
groupArray(value) OVER w1 AS frame_values_1,
|
||||
@ -313,7 +442,9 @@ ORDER BY
|
||||
│ [1,2,3,4] │ 1 │ 4 │ [3,4] │ 3 │ 4 │
|
||||
│ [1,2,3,4,5] │ 1 │ 5 │ [4,5] │ 4 │ 5 │
|
||||
└────────────────┴───────────────┴──────────────┴────────────────┴───────────────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
-- second value within the frame
|
||||
SELECT
|
||||
groupArray(value) OVER w1 AS frame_values_1,
|
||||
@ -330,7 +461,9 @@ ORDER BY
|
||||
│ [1,2,3,4] │ 2 │
|
||||
│ [2,3,4,5] │ 3 │
|
||||
└────────────────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
-- second value within the frame + Null for missing values
|
||||
SELECT
|
||||
groupArray(value) OVER w1 AS frame_values_1,
|
||||
@ -351,7 +484,9 @@ ORDER BY
|
||||
|
||||
## Real world examples
|
||||
|
||||
### Maximum/total salary per department.
|
||||
The following examples solve common real-world problems.
|
||||
|
||||
### Maximum/total salary per department
|
||||
|
||||
```sql
|
||||
CREATE TABLE employees
|
||||
@ -369,7 +504,9 @@ INSERT INTO employees FORMAT Values
|
||||
('IT', 'Tim', 200),
|
||||
('IT', 'Anna', 300),
|
||||
('IT', 'Elen', 500);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
department,
|
||||
employee_name AS emp,
|
||||
@ -386,8 +523,10 @@ FROM
|
||||
max(salary) OVER wndw AS max_salary_per_dep,
|
||||
sum(salary) OVER wndw AS total_salary_per_dep
|
||||
FROM employees
|
||||
WINDOW wndw AS (PARTITION BY department
|
||||
rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
||||
WINDOW wndw AS (
|
||||
PARTITION BY department
|
||||
rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
|
||||
)
|
||||
ORDER BY
|
||||
department ASC,
|
||||
employee_name ASC
|
||||
@ -403,7 +542,7 @@ FROM
|
||||
└────────────┴──────┴────────┴────────────────────┴──────────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
### Cumulative sum.
|
||||
### Cumulative sum
|
||||
|
||||
```sql
|
||||
CREATE TABLE warehouse
|
||||
@ -421,7 +560,9 @@ INSERT INTO warehouse VALUES
|
||||
('sku1', '2020-01-01', 1),
|
||||
('sku1', '2020-02-01', 1),
|
||||
('sku1', '2020-03-01', 1);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
item,
|
||||
ts,
|
||||
@ -461,13 +602,18 @@ insert into sensors values('cpu_temp', '2020-01-01 00:00:00', 87),
|
||||
('cpu_temp', '2020-01-01 00:00:05', 87),
|
||||
('cpu_temp', '2020-01-01 00:00:06', 87),
|
||||
('cpu_temp', '2020-01-01 00:00:07', 87);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
metric,
|
||||
ts,
|
||||
value,
|
||||
avg(value) OVER
|
||||
(PARTITION BY metric ORDER BY ts ASC Rows BETWEEN 2 PRECEDING AND CURRENT ROW)
|
||||
AS moving_avg_temp
|
||||
avg(value) OVER (
|
||||
PARTITION BY metric
|
||||
ORDER BY ts ASC
|
||||
Rows BETWEEN 2 PRECEDING AND CURRENT ROW
|
||||
) AS moving_avg_temp
|
||||
FROM sensors
|
||||
ORDER BY
|
||||
metric ASC,
|
||||
@ -536,7 +682,9 @@ insert into sensors values('ambient_temp', '2020-01-01 00:00:00', 16),
|
||||
('ambient_temp', '2020-03-01 12:00:00', 16),
|
||||
('ambient_temp', '2020-03-01 12:00:00', 16),
|
||||
('ambient_temp', '2020-03-01 12:00:00', 16);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
metric,
|
||||
ts,
|
||||
|
@ -434,16 +434,18 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[regex:/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<url><![CDATA[regex:/query_param_with_url/(?P<name_1>[^/]+)]]></url>
|
||||
<methods>GET</methods>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
<PARAMS_XXX><![CDATA[regex:(?P<name_2>[^/]+)]]></PARAMS_XXX>
|
||||
</headers>
|
||||
<handler>
|
||||
<type>predefined_query_handler</type>
|
||||
<query>SELECT value FROM system.settings WHERE name = {name_1:String}</query>
|
||||
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query>
|
||||
<query>
|
||||
SELECT name, value FROM system.settings
|
||||
WHERE name IN ({name_1:String}, {name_2:String})
|
||||
</query>
|
||||
</handler>
|
||||
</rule>
|
||||
<defaults/>
|
||||
@ -451,13 +453,13 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_final_threads?max_threads=1&max_final_threads=2'
|
||||
1
|
||||
max_final_threads 2
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_final_threads' 'http://localhost:8123/query_param_with_url/max_threads?max_threads=1&max_final_threads=2'
|
||||
max_final_threads 2
|
||||
max_threads 1
|
||||
```
|
||||
|
||||
:::note Предупреждение
|
||||
В одном `predefined_query_handler` поддерживается только один запрос типа `INSERT`.
|
||||
В одном `predefined_query_handler` поддерживается только один запрос.
|
||||
:::
|
||||
### dynamic_query_handler {#dynamic_query_handler}
|
||||
|
||||
|
@ -2776,7 +2776,7 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
|
||||
- 0 — номера строк не выводятся.
|
||||
- 1 — номера строк выводятся.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
Значение по умолчанию: `1`.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -2798,7 +2798,7 @@ SELECT TOP 3 name, value FROM system.settings;
|
||||
```
|
||||
### output_format_pretty_color {#output_format_pretty_color}
|
||||
|
||||
Включает/выключает управляющие последовательности ANSI в форматах Pretty.
|
||||
Включает/выключает управляющие последовательности ANSI в форматах Pretty.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
@ -4123,7 +4123,7 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca
|
||||
## session_timezone {#session_timezone}
|
||||
|
||||
Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан часовой пояс, будут интерпретированы как относящиеся к указанной зоне.
|
||||
При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера.
|
||||
При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера.
|
||||
|
||||
Функции `timeZone()` and `serverTimezone()` возвращают часовой пояс текущей сессии и сервера соответственно.
|
||||
|
||||
|
@ -120,7 +120,7 @@ FROM dt
|
||||
- [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md)
|
||||
- [Функции для работы с массивами](../../sql-reference/functions/array-functions.md)
|
||||
- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format)
|
||||
- [Настройка `date_time_output_format`](../../operations/settings/index.md)
|
||||
- [Настройка `date_time_output_format`](../../operations/settings/index.md#settings-date_time_output_format)
|
||||
- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
|
||||
- [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone)
|
||||
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)
|
||||
|
@ -427,29 +427,32 @@ $ curl -v 'http://localhost:8123/predefined_query'
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<method>GET</method>
|
||||
<url><![CDATA[regex:/query_param_with_url/(?P<name_1>[^/]+)]]></url>
|
||||
<methods>GET</methods>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
<PARAMS_XXX><![CDATA[regex:(?P<name_2>[^/]+)]]></PARAMS_XXX>
|
||||
</headers>
|
||||
<handler>
|
||||
<type>predefined_query_handler</type>
|
||||
<query>SELECT value FROM system.settings WHERE name = {name_1:String}</query>
|
||||
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query>
|
||||
<query>
|
||||
SELECT name, value FROM system.settings
|
||||
WHERE name IN ({name_1:String}, {name_2:String})
|
||||
</query>
|
||||
</handler>
|
||||
</rule>
|
||||
<defaults/>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_final_threads?max_threads=1&max_final_threads=2'
|
||||
1
|
||||
max_final_threads 2
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_final_threads' 'http://localhost:8123/query_param_with_url/max_threads?max_threads=1&max_final_threads=2'
|
||||
max_final_threads 2
|
||||
max_threads 1
|
||||
```
|
||||
|
||||
:::warning
|
||||
在一个`predefined_query_handler`中,只支持insert类型的一个`查询`。
|
||||
在一个`predefined_query_handler`中,只支持的一个`查询`。
|
||||
:::
|
||||
|
||||
### 动态查询 {#dynamic_query_handler}
|
||||
|
@ -30,10 +30,6 @@ conflicts:
|
||||
contents:
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse-library-bridge.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse-library-bridge.debug
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-common-static-dbg/AUTHORS
|
||||
|
@ -36,10 +36,6 @@ contents:
|
||||
dst: /usr/bin/clickhouse
|
||||
- src: root/usr/bin/clickhouse-extract-from-config
|
||||
dst: /usr/bin/clickhouse-extract-from-config
|
||||
- src: root/usr/bin/clickhouse-library-bridge
|
||||
dst: /usr/bin/clickhouse-library-bridge
|
||||
- src: root/usr/bin/clickhouse-odbc-bridge
|
||||
dst: /usr/bin/clickhouse-odbc-bridge
|
||||
- src: root/usr/share/bash-completion/completions
|
||||
dst: /usr/share/bash-completion/completions
|
||||
- src: root/usr/share/clickhouse
|
||||
|
35
packages/clickhouse-library-bridge.yaml
Normal file
35
packages/clickhouse-library-bridge.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# package sources should be placed in ${PWD}/root
|
||||
# nfpm should run from the same directory with a config
|
||||
name: "clickhouse-library-bridge"
|
||||
description: |
|
||||
ClickHouse Library Bridge - is a separate process for loading libraries for the 'library' dictionary sources and the CatBoost library.
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
|
||||
# Common packages config
|
||||
arch: "${DEB_ARCH}" # amd64, arm64
|
||||
platform: "linux"
|
||||
version: "${CLICKHOUSE_VERSION_STRING}"
|
||||
vendor: "ClickHouse Inc."
|
||||
homepage: "https://clickhouse.com"
|
||||
license: "Apache"
|
||||
section: "database"
|
||||
priority: "optional"
|
||||
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
|
||||
deb:
|
||||
fields:
|
||||
Source: clickhouse
|
||||
|
||||
# Package specific content
|
||||
contents:
|
||||
- src: root/usr/bin/clickhouse-library-bridge
|
||||
dst: /usr/bin/clickhouse-library-bridge
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/AUTHORS
|
||||
- src: ../CHANGELOG.md
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/CHANGELOG.md
|
||||
- src: ../LICENSE
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/LICENSE
|
||||
- src: ../README.md
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/README.md
|
35
packages/clickhouse-odbc-bridge.yaml
Normal file
35
packages/clickhouse-odbc-bridge.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# package sources should be placed in ${PWD}/root
|
||||
# nfpm should run from the same directory with a config
|
||||
name: "clickhouse-odbc-bridge"
|
||||
description: |
|
||||
ClickHouse ODBC Bridge - is a separate process for loading ODBC drivers and interacting with external databases using the ODBC protocol.
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
|
||||
# Common packages config
|
||||
arch: "${DEB_ARCH}" # amd64, arm64
|
||||
platform: "linux"
|
||||
version: "${CLICKHOUSE_VERSION_STRING}"
|
||||
vendor: "ClickHouse Inc."
|
||||
homepage: "https://clickhouse.com"
|
||||
license: "Apache"
|
||||
section: "database"
|
||||
priority: "optional"
|
||||
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
|
||||
deb:
|
||||
fields:
|
||||
Source: clickhouse
|
||||
|
||||
# Package specific content
|
||||
contents:
|
||||
- src: root/usr/bin/clickhouse-odbc-bridge
|
||||
dst: /usr/bin/clickhouse-odbc-bridge
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/AUTHORS
|
||||
- src: ../CHANGELOG.md
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/CHANGELOG.md
|
||||
- src: ../LICENSE
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/LICENSE
|
||||
- src: ../README.md
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/README.md
|
@ -237,7 +237,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
ASTPtr res = parseQueryAndMovePosition(
|
||||
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth, cmd_settings.max_parser_backtracks);
|
||||
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload = nullptr;
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload;
|
||||
/// If the query is INSERT ... VALUES, then we will try to parse the data.
|
||||
if (auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
|
||||
{
|
||||
|
@ -662,7 +662,6 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
" <server>\n"
|
||||
" <certificateFile>" << (config_dir / "server.crt").string() << "</certificateFile>\n"
|
||||
" <privateKeyFile>" << (config_dir / "server.key").string() << "</privateKeyFile>\n"
|
||||
" <dhParamsFile>" << (config_dir / "dhparam.pem").string() << "</dhParamsFile>\n"
|
||||
" </server>\n"
|
||||
" </openSSL>\n"
|
||||
"</clickhouse>\n";
|
||||
|
@ -24,9 +24,4 @@ target_link_libraries(clickhouse-library-bridge PRIVATE
|
||||
|
||||
set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
|
||||
|
||||
if (SPLIT_DEBUG_SYMBOLS)
|
||||
clickhouse_split_debug_symbols(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-library-bridge)
|
||||
else()
|
||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
ExternalDictionaryLibraryAPI::CStrings strings; // will pass pointer to lib
|
||||
|
||||
private:
|
||||
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder = nullptr;
|
||||
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder;
|
||||
Container strings_holder;
|
||||
};
|
||||
|
||||
|
@ -30,12 +30,7 @@ target_link_libraries(clickhouse-odbc-bridge PRIVATE
|
||||
set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
|
||||
target_compile_options (clickhouse-odbc-bridge PRIVATE -Wno-reserved-id-macro -Wno-keyword-macro)
|
||||
|
||||
if (SPLIT_DEBUG_SYMBOLS)
|
||||
clickhouse_split_debug_symbols(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-odbc-bridge)
|
||||
else()
|
||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(tests)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
#include <Poco/Net/HTTPServerResponse.h>
|
||||
#include <Poco/NumberParser.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/BridgeProtocolVersion.h>
|
||||
#include <Common/quoteString.h>
|
||||
|
@ -5,7 +5,6 @@
|
||||
#if USE_ODBC
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Server/HTTP/HTTPRequestHandler.h>
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
|
@ -734,13 +734,17 @@ try
|
||||
LOG_INFO(log, "Available CPU instruction sets: {}", cpu_info);
|
||||
#endif
|
||||
|
||||
bool will_have_trace_collector = hasPHDRCache() && config().has("trace_log");
|
||||
|
||||
// Initialize global thread pool. Do it before we fetch configs from zookeeper
|
||||
// nodes (`from_zk`), because ZooKeeper interface uses the pool. We will
|
||||
// ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well.
|
||||
GlobalThreadPool::initialize(
|
||||
server_settings.max_thread_pool_size,
|
||||
server_settings.max_thread_pool_free_size,
|
||||
server_settings.thread_pool_queue_size);
|
||||
server_settings.thread_pool_queue_size,
|
||||
will_have_trace_collector ? server_settings.global_profiler_real_time_period_ns : 0,
|
||||
will_have_trace_collector ? server_settings.global_profiler_cpu_time_period_ns : 0);
|
||||
/// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed).
|
||||
SCOPE_EXIT({
|
||||
Stopwatch watch;
|
||||
|
@ -96,7 +96,7 @@
|
||||
<to>https://{bucket}.s3.amazonaws.com</to>
|
||||
</s3>
|
||||
<gs>
|
||||
<to>https://{bucket}.storage.googleapis.com</to>
|
||||
<to>https://storage.googleapis.com/{bucket}</to>
|
||||
</gs>
|
||||
<oss>
|
||||
<to>https://{bucket}.oss.aliyuncs.com</to>
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Poco/UUIDGenerator.h>
|
||||
#include <base/insertAtEnd.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
@ -115,34 +115,34 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], row_num, arena);
|
||||
data(place).add(*columns[0], row_num, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
|
||||
{
|
||||
this->data(place).addManyDefaults(*columns[0], 0, arena);
|
||||
data(place).addManyDefaults(*columns[0], 0, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(this->data(rhs), arena);
|
||||
data(place).add(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -559,7 +559,7 @@ public:
|
||||
ptr = ptrs[row_num];
|
||||
}
|
||||
|
||||
this->data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(
|
||||
@ -572,7 +572,7 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs), arena);
|
||||
data(place).merge(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t> /* version */) const override
|
||||
@ -590,7 +590,7 @@ public:
|
||||
auto & array = assert_cast<ColumnArray &>(to);
|
||||
auto & str = assert_cast<ColumnString &>(array.getData());
|
||||
|
||||
this->data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
|
||||
array.getOffsets().push_back(str.size());
|
||||
}
|
||||
|
@ -89,10 +89,10 @@ struct GroupArraySamplerData
|
||||
chassert(lim != 0);
|
||||
|
||||
/// With a large number of values, we will generate random numbers several times slower.
|
||||
if (lim <= static_cast<UInt64>(rng.max()))
|
||||
if (lim <= static_cast<UInt64>(pcg32_fast::max()))
|
||||
return rng() % lim;
|
||||
else
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||
}
|
||||
|
||||
void randomShuffle()
|
||||
|
@ -242,7 +242,7 @@ public:
|
||||
{
|
||||
Float64 x = getFloat64DataFromColumn(columns[0], row_num, this->x_type);
|
||||
Float64 y = getFloat64DataFromColumn(columns[1], row_num, this->y_type);
|
||||
this->data(place).add(x, y, arena);
|
||||
data(place).add(x, y, arena);
|
||||
}
|
||||
|
||||
Float64 getFloat64DataFromColumn(const IColumn * column, size_t row_num, TypeIndex type_index) const
|
||||
@ -264,25 +264,25 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
|
||||
{
|
||||
auto res = this->data(place).getResult(total_buckets, arena);
|
||||
auto res = data(place).getResult(total_buckets, arena);
|
||||
|
||||
auto & col = assert_cast<ColumnArray &>(to);
|
||||
auto & col_offsets = assert_cast<ColumnArray::ColumnOffsets &>(col.getOffsetsColumn());
|
||||
|
@ -205,35 +205,35 @@ public:
|
||||
UInt8 is_second = columns[1]->getUInt(row_num);
|
||||
|
||||
if (is_second)
|
||||
this->data(place).addY(value, arena);
|
||||
data(place).addY(value, arena);
|
||||
else
|
||||
this->data(place).addX(value, arena);
|
||||
data(place).addX(value, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
if (!this->data(place).size_x || !this->data(place).size_y)
|
||||
if (!data(place).size_x || !data(place).size_y)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} require both samples to be non empty", getName());
|
||||
|
||||
auto [u_statistic, p_value] = this->data(place).getResult(alternative, continuity_correction);
|
||||
auto [u_statistic, p_value] = data(place).getResult(alternative, continuity_correction);
|
||||
|
||||
/// Because p-value is a probability.
|
||||
p_value = std::min(1.0, std::max(0.0, p_value));
|
||||
|
@ -66,31 +66,31 @@ public:
|
||||
{
|
||||
Float64 new_x = columns[0]->getFloat64(row_num);
|
||||
Float64 new_y = columns[1]->getFloat64(row_num);
|
||||
this->data(place).addX(new_x, arena);
|
||||
this->data(place).addY(new_y, arena);
|
||||
data(place).addX(new_x, arena);
|
||||
data(place).addY(new_y, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto answer = this->data(place).getResult();
|
||||
auto answer = data(place).getResult();
|
||||
|
||||
auto & column = static_cast<ColumnVector<Float64> &>(to);
|
||||
column.getData().push_back(answer);
|
||||
|
@ -102,24 +102,24 @@ public:
|
||||
auto event = assert_cast<const ColumnVector<UInt8> *>(columns[i])->getData()[row_num];
|
||||
if (event)
|
||||
{
|
||||
this->data(place).add(i);
|
||||
data(place).add(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
@ -130,13 +130,13 @@ public:
|
||||
ColumnArray::Offset current_offset = data_to.size();
|
||||
data_to.resize(current_offset + events_size);
|
||||
|
||||
const bool first_flag = this->data(place).events.test(0);
|
||||
const bool first_flag = data(place).events.test(0);
|
||||
data_to[current_offset] = first_flag;
|
||||
++current_offset;
|
||||
|
||||
for (size_t i = 1; i < events_size; ++i)
|
||||
{
|
||||
data_to[current_offset] = (first_flag && this->data(place).events.test(i));
|
||||
data_to[current_offset] = (first_flag && data(place).events.test(i));
|
||||
++current_offset;
|
||||
}
|
||||
|
||||
|
@ -123,22 +123,22 @@ public:
|
||||
Float64 x = columns[0]->getFloat64(row_num);
|
||||
Float64 y = columns[1]->getFloat64(row_num);
|
||||
|
||||
this->data(place).add(x, y);
|
||||
data(place).add(x, y);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
@ -168,8 +168,8 @@ public:
|
||||
IColumn & to,
|
||||
Arena *) const override
|
||||
{
|
||||
Float64 k = this->data(place).getK();
|
||||
Float64 b = this->data(place).getB(k);
|
||||
Float64 k = data(place).getK();
|
||||
Float64 b = data(place).getB(k);
|
||||
|
||||
auto & col_tuple = assert_cast<ColumnTuple &>(to);
|
||||
auto & col_k = assert_cast<ColumnVector<Float64> &>(col_tuple.getColumn(0));
|
||||
|
@ -120,7 +120,7 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], row_num, arena);
|
||||
data(place).add(*columns[0], row_num, arena);
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(
|
||||
@ -131,7 +131,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override
|
||||
{
|
||||
if (this->data(place).isNull())
|
||||
if (data(place).isNull())
|
||||
return;
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlace(
|
||||
row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
@ -146,7 +146,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override
|
||||
{
|
||||
if (this->data(place).isNull())
|
||||
if (data(place).isNull())
|
||||
return;
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlaceNotNull(
|
||||
row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
@ -154,29 +154,29 @@ public:
|
||||
|
||||
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], 0, arena);
|
||||
data(place).add(*columns[0], 0, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(this->data(rhs), arena);
|
||||
data(place).add(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -150,13 +150,13 @@ private:
|
||||
|
||||
Float64 getResult(ConstAggregateDataPtr __restrict place) const
|
||||
{
|
||||
const auto & data = this->data(place);
|
||||
const auto & dt = data(place);
|
||||
switch (kind)
|
||||
{
|
||||
case VarKind::varSampStable: return getVarSamp(data.m2, data.count);
|
||||
case VarKind::stddevSampStable: return getStddevSamp(data.m2, data.count);
|
||||
case VarKind::varPopStable: return getVarPop(data.m2, data.count);
|
||||
case VarKind::stddevPopStable: return getStddevPop(data.m2, data.count);
|
||||
case VarKind::varSampStable: return getVarSamp(dt.m2, dt.count);
|
||||
case VarKind::stddevSampStable: return getStddevSamp(dt.m2, dt.count);
|
||||
case VarKind::varPopStable: return getVarPop(dt.m2, dt.count);
|
||||
case VarKind::stddevPopStable: return getStddevPop(dt.m2, dt.count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,22 +182,22 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
this->data(place).update(*columns[0], row_num);
|
||||
data(place).update(*columns[0], row_num);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).mergeWith(this->data(rhs));
|
||||
data(place).mergeWith(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
|
@ -491,7 +491,7 @@ public:
|
||||
std::vector<const UInt8 *> nullable_filters;
|
||||
const IColumn * nested_columns[number_of_arguments];
|
||||
|
||||
std::unique_ptr<UInt8[]> final_flags = nullptr;
|
||||
std::unique_ptr<UInt8[]> final_flags;
|
||||
const UInt8 * final_flags_ptr = nullptr;
|
||||
|
||||
if (if_argument_pos >= 0)
|
||||
|
@ -1,17 +1,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction_fwd.h>
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Core/ColumnNumbers.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/IResolvedFunction.h>
|
||||
#include <Core/ValuesWithType.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ThreadPool_fwd.h>
|
||||
#include <Core/IResolvedFunction.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
@ -46,13 +47,6 @@ class IWindowFunction;
|
||||
using DataTypePtr = std::shared_ptr<const IDataType>;
|
||||
using DataTypes = std::vector<DataTypePtr>;
|
||||
|
||||
using AggregateDataPtr = char *;
|
||||
using AggregateDataPtrs = std::vector<AggregateDataPtr>;
|
||||
using ConstAggregateDataPtr = const char *;
|
||||
|
||||
class IAggregateFunction;
|
||||
using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
|
||||
|
||||
struct AggregateFunctionProperties;
|
||||
|
||||
/** Aggregate functions interface.
|
||||
|
14
src/AggregateFunctions/IAggregateFunction_fwd.h
Normal file
14
src/AggregateFunctions/IAggregateFunction_fwd.h
Normal file
@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
using AggregateDataPtr = char *;
|
||||
using AggregateDataPtrs = std::vector<AggregateDataPtr>;
|
||||
using ConstAggregateDataPtr = const char *;
|
||||
|
||||
class IAggregateFunction;
|
||||
using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
|
||||
}
|
@ -258,10 +258,10 @@ private:
|
||||
chassert(limit > 0);
|
||||
|
||||
/// With a large number of values, we will generate random numbers several times slower.
|
||||
if (limit <= static_cast<UInt64>(rng.max()))
|
||||
if (limit <= static_cast<UInt64>(pcg32_fast::max()))
|
||||
return rng() % limit;
|
||||
else
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32_fast::max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
|
||||
}
|
||||
|
||||
void sortIfNeeded()
|
||||
|
@ -579,7 +579,7 @@ std::optional<size_t> SingleValueDataFixed<T>::getGreatestIndexNotNullIf(
|
||||
return std::nullopt;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
if ((!if_map || if_map[i] != 0) && (!null_map || null_map[i] == 0) && (vec[i] < vec[index]))
|
||||
if ((!if_map || if_map[i] != 0) && (!null_map || null_map[i] == 0) && (vec[i] > vec[index]))
|
||||
index = i;
|
||||
return {index};
|
||||
}
|
||||
|
@ -1,14 +1,12 @@
|
||||
#include <Analyzer/ArrayJoinNode.h>
|
||||
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,14 +1,12 @@
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
#include <Analyzer/TableNode.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <Analyzer/TableNode.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,16 +1,14 @@
|
||||
#include <Analyzer/JoinNode.h>
|
||||
#include <Analyzer/ListNode.h>
|
||||
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1940,8 +1940,7 @@ std::vector<String> QueryAnalyzer::collectIdentifierTypoHints(const Identifier &
|
||||
for (const auto & valid_identifier : valid_identifiers)
|
||||
prompting_strings.push_back(valid_identifier.getFullName());
|
||||
|
||||
NamePrompter<1> prompter;
|
||||
return prompter.getHints(unresolved_identifier.getFullName(), prompting_strings);
|
||||
return NamePrompter<1>::getHints(unresolved_identifier.getFullName(), prompting_strings);
|
||||
}
|
||||
|
||||
/** Wrap expression node in tuple element function calls for nested paths.
|
||||
@ -3993,9 +3992,15 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const
|
||||
}
|
||||
else if (resolved_identifier->as<ConstantNode>())
|
||||
{
|
||||
lookup_result.resolved_identifier = resolved_identifier;
|
||||
return lookup_result;
|
||||
}
|
||||
else if (auto * resolved_function = resolved_identifier->as<FunctionNode>())
|
||||
{
|
||||
/// Special case: scalar subquery was executed and replaced by __getScalar function.
|
||||
/// Handle it as a constant.
|
||||
if (resolved_function->getFunctionName() == "__getScalar")
|
||||
return lookup_result;
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
|
||||
"Resolve identifier '{}' from parent scope only supported for constants and CTE. Actual {} node type {}. In scope {}",
|
||||
@ -6083,7 +6088,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
* Example: SELECT toTypeName(sum(number)) FROM numbers(10);
|
||||
*/
|
||||
if (column && isColumnConst(*column) && !typeid_cast<const ColumnConst *>(column.get())->getDataColumn().isDummy() &&
|
||||
(!hasAggregateFunctionNodes(node) && !hasFunctionNode(node, "arrayJoin")))
|
||||
!hasAggregateFunctionNodes(node) && !hasFunctionNode(node, "arrayJoin") &&
|
||||
/// Sanity check: do not convert large columns to constants
|
||||
column->byteSize() < 1_MiB)
|
||||
{
|
||||
/// Replace function node with result constant node
|
||||
Field column_constant_value;
|
||||
|
@ -29,7 +29,8 @@ NamesAndTypes extractProjectionColumnsForGroupBy(const QueryNode * query_node)
|
||||
return {};
|
||||
|
||||
NamesAndTypes result;
|
||||
for (const auto & group_by_ele : query_node->getGroupByNode()->getChildren())
|
||||
const auto & group_by_elements = query_node->getGroupByNode()->getChildren();
|
||||
for (const auto & group_by_element : group_by_elements)
|
||||
{
|
||||
const auto & projection_columns = query_node->getProjectionColumns();
|
||||
const auto & projection_nodes = query_node->getProjection().getNodes();
|
||||
@ -38,10 +39,18 @@ NamesAndTypes extractProjectionColumnsForGroupBy(const QueryNode * query_node)
|
||||
|
||||
for (size_t i = 0; i < projection_columns.size(); i++)
|
||||
{
|
||||
if (projection_nodes[i]->isEqual(*group_by_ele))
|
||||
if (projection_nodes[i]->isEqual(*group_by_element))
|
||||
{
|
||||
result.push_back(projection_columns[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
/// If some group by keys are not matched, we cannot apply optimization,
|
||||
/// because prefix of group by keys may not be unique.
|
||||
if (result.size() != group_by_elements.size())
|
||||
return {};
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -444,8 +444,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
|
||||
nulls_sort_direction = order_by_element.nulls_direction == 1 ? SortDirection::ASCENDING : SortDirection::DESCENDING;
|
||||
|
||||
std::shared_ptr<Collator> collator;
|
||||
if (order_by_element.collation)
|
||||
collator = std::make_shared<Collator>(order_by_element.collation->as<ASTLiteral &>().value.get<String &>());
|
||||
if (order_by_element.getCollation())
|
||||
collator = std::make_shared<Collator>(order_by_element.getCollation()->as<ASTLiteral &>().value.get<String &>());
|
||||
|
||||
const auto & sort_expression_ast = order_by_element.children.at(0);
|
||||
auto sort_expression = buildExpression(sort_expression_ast, context);
|
||||
@ -455,12 +455,12 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
|
||||
std::move(collator),
|
||||
order_by_element.with_fill);
|
||||
|
||||
if (order_by_element.fill_from)
|
||||
sort_node->getFillFrom() = buildExpression(order_by_element.fill_from, context);
|
||||
if (order_by_element.fill_to)
|
||||
sort_node->getFillTo() = buildExpression(order_by_element.fill_to, context);
|
||||
if (order_by_element.fill_step)
|
||||
sort_node->getFillStep() = buildExpression(order_by_element.fill_step, context);
|
||||
if (order_by_element.getFillFrom())
|
||||
sort_node->getFillFrom() = buildExpression(order_by_element.getFillFrom(), context);
|
||||
if (order_by_element.getFillTo())
|
||||
sort_node->getFillTo() = buildExpression(order_by_element.getFillTo(), context);
|
||||
if (order_by_element.getFillStep())
|
||||
sort_node->getFillStep() = buildExpression(order_by_element.getFillStep(), context);
|
||||
|
||||
list_node->getNodes().push_back(std::move(sort_node));
|
||||
}
|
||||
@ -558,7 +558,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildExpression(const ASTPtr & expression, co
|
||||
}
|
||||
else if (const auto * function = expression->as<ASTFunction>())
|
||||
{
|
||||
if (function->is_lambda_function)
|
||||
if (function->is_lambda_function || isASTLambdaFunction(*function))
|
||||
{
|
||||
const auto & lambda_arguments_and_expression = function->arguments->as<ASTExpressionList &>().children;
|
||||
auto & lambda_arguments_tuple = lambda_arguments_and_expression.at(0)->as<ASTFunction &>();
|
||||
|
@ -120,17 +120,18 @@ ASTPtr SortNode::toASTImpl(const ConvertToASTOptions & options) const
|
||||
|
||||
result->nulls_direction_was_explicitly_specified = nulls_sort_direction.has_value();
|
||||
|
||||
result->with_fill = with_fill;
|
||||
result->fill_from = hasFillFrom() ? getFillFrom()->toAST(options) : nullptr;
|
||||
result->fill_to = hasFillTo() ? getFillTo()->toAST(options) : nullptr;
|
||||
result->fill_step = hasFillStep() ? getFillStep()->toAST(options) : nullptr;
|
||||
result->children.push_back(getExpression()->toAST(options));
|
||||
|
||||
if (collator)
|
||||
{
|
||||
result->children.push_back(std::make_shared<ASTLiteral>(Field(collator->getLocale())));
|
||||
result->collation = result->children.back();
|
||||
}
|
||||
result->setCollation(std::make_shared<ASTLiteral>(Field(collator->getLocale())));
|
||||
|
||||
result->with_fill = with_fill;
|
||||
if (hasFillFrom())
|
||||
result->setFillFrom(getFillFrom()->toAST(options));
|
||||
if (hasFillTo())
|
||||
result->setFillTo(getFillTo()->toAST(options));
|
||||
if (hasFillStep())
|
||||
result->setFillStep(getFillStep()->toAST(options));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1,11 +1,9 @@
|
||||
#include <Analyzer/WindowNode.h>
|
||||
|
||||
#include <Common/SipHash.h>
|
||||
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Parsers/ASTWindowDefinition.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,22 +1,25 @@
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <Backups/BackupCoordinationStage.h>
|
||||
#include <Backups/BackupEntriesCollector.h>
|
||||
#include <Backups/BackupEntryFromMemory.h>
|
||||
#include <Backups/IBackupCoordination.h>
|
||||
#include <Backups/BackupCoordinationStage.h>
|
||||
#include <Backups/BackupUtils.h>
|
||||
#include <Backups/DDLAdjustingForBackupVisitor.h>
|
||||
#include <Backups/IBackupCoordination.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <base/chrono_io.h>
|
||||
#include <base/insertAtEnd.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <base/sleep.h>
|
||||
#include <Common/escapeForFileName.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
@ -124,11 +124,12 @@ BackupReaderS3::BackupReaderS3(
|
||||
bool allow_s3_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_)
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup)
|
||||
: BackupReaderDefault(read_settings_, write_settings_, getLogger("BackupReaderS3"))
|
||||
, s3_uri(s3_uri_)
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false}
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName()))
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName(), /*ignore_user=*/is_internal_backup))
|
||||
{
|
||||
auto & request_settings = s3_settings.request_settings;
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
@ -214,11 +215,12 @@ BackupWriterS3::BackupWriterS3(
|
||||
const String & storage_class_name,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_)
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup)
|
||||
: BackupWriterDefault(read_settings_, write_settings_, getLogger("BackupWriterS3"))
|
||||
, s3_uri(s3_uri_)
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false}
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName()))
|
||||
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName(), /*ignore_user=*/is_internal_backup))
|
||||
{
|
||||
auto & request_settings = s3_settings.request_settings;
|
||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||
|
@ -18,7 +18,15 @@ namespace DB
|
||||
class BackupReaderS3 : public BackupReaderDefault
|
||||
{
|
||||
public:
|
||||
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
|
||||
BackupReaderS3(
|
||||
const S3::URI & s3_uri_,
|
||||
const String & access_key_id_,
|
||||
const String & secret_access_key_,
|
||||
bool allow_s3_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup);
|
||||
~BackupReaderS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
@ -41,7 +49,16 @@ private:
|
||||
class BackupWriterS3 : public BackupWriterDefault
|
||||
{
|
||||
public:
|
||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const String & storage_class_name, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
|
||||
BackupWriterS3(
|
||||
const S3::URI & s3_uri_,
|
||||
const String & access_key_id_,
|
||||
const String & secret_access_key_,
|
||||
bool allow_s3_native_copy,
|
||||
const String & storage_class_name,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_,
|
||||
bool is_internal_backup);
|
||||
~BackupWriterS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
|
||||
|
||||
namespace CurrentMetrics
|
||||
{
|
||||
@ -940,6 +942,7 @@ void BackupsWorker::doRestore(
|
||||
backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup;
|
||||
backup_open_params.read_settings = getReadSettingsForRestore(context);
|
||||
backup_open_params.write_settings = getWriteSettingsForRestore(context);
|
||||
backup_open_params.is_internal_backup = restore_settings.internal;
|
||||
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
|
||||
|
||||
String current_database = context->getCurrentDatabase();
|
||||
|
@ -24,6 +24,9 @@
|
||||
#include <Common/escapeForFileName.h>
|
||||
#include <base/insertAtEnd.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
|
||||
#include <filesystem>
|
||||
#include <ranges>
|
||||
|
||||
|
@ -110,7 +110,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.allow_s3_native_copy,
|
||||
params.read_settings,
|
||||
params.write_settings,
|
||||
params.context);
|
||||
params.context,
|
||||
params.is_internal_backup);
|
||||
|
||||
return std::make_unique<BackupImpl>(
|
||||
params.backup_info,
|
||||
@ -129,7 +130,8 @@ void registerBackupEngineS3(BackupFactory & factory)
|
||||
params.s3_storage_class,
|
||||
params.read_settings,
|
||||
params.write_settings,
|
||||
params.context);
|
||||
params.context,
|
||||
params.is_internal_backup);
|
||||
|
||||
return std::make_unique<BackupImpl>(
|
||||
params.backup_info,
|
||||
|
@ -1964,7 +1964,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
||||
}
|
||||
|
||||
/// INSERT query for which data transfer is needed (not an INSERT SELECT or input()) is processed separately.
|
||||
if (insert && (!insert->select || input_function) && !insert->watch && !is_async_insert_with_inlined_data)
|
||||
if (insert && (!insert->select || input_function) && !is_async_insert_with_inlined_data)
|
||||
{
|
||||
if (input_function && insert->format.empty())
|
||||
throw Exception(ErrorCodes::INVALID_USAGE_OF_INPUT, "FORMAT must be specified for function input()");
|
||||
|
@ -1,7 +1,13 @@
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <Columns/MaskOperations.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBufferFromArena.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
#include <Common/AlignedBuffer.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
@ -11,10 +17,6 @@
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/iota.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromArena.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -109,6 +111,11 @@ ConstArenas concatArenas(const ConstArenas & array, ConstArenaPtr arena)
|
||||
|
||||
}
|
||||
|
||||
std::string ColumnAggregateFunction::getName() const
|
||||
{
|
||||
return "AggregateFunction(" + func->getName() + ")";
|
||||
}
|
||||
|
||||
MutableColumnPtr ColumnAggregateFunction::convertToValues(MutableColumnPtr column)
|
||||
{
|
||||
/** If the aggregate function returns an unfinalized/unfinished state,
|
||||
|
@ -1,17 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction_fwd.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/PODArray.h>
|
||||
|
||||
#include <Core/Field.h>
|
||||
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Common/PODArray.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -26,6 +18,12 @@ using ArenaPtr = std::shared_ptr<Arena>;
|
||||
using ConstArenaPtr = std::shared_ptr<const Arena>;
|
||||
using ConstArenas = std::vector<ConstArenaPtr>;
|
||||
|
||||
class Context;
|
||||
using ContextPtr = std::shared_ptr<const Context>;
|
||||
|
||||
struct ColumnWithTypeAndName;
|
||||
using ColumnsWithTypeAndName = std::vector<ColumnWithTypeAndName>;
|
||||
|
||||
|
||||
/** Column of states of aggregate functions.
|
||||
* Presented as an array of pointers to the states of aggregate functions (data).
|
||||
@ -121,7 +119,7 @@ public:
|
||||
/// This method is made static and receive MutableColumnPtr object to explicitly destroy it.
|
||||
static MutableColumnPtr convertToValues(MutableColumnPtr column);
|
||||
|
||||
std::string getName() const override { return "AggregateFunction(" + func->getName() + ")"; }
|
||||
std::string getName() const override;
|
||||
const char * getFamilyName() const override { return "AggregateFunction"; }
|
||||
TypeIndex getDataType() const override { return TypeIndex::AggregateFunction; }
|
||||
|
||||
|
@ -1,12 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/Names.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <DataTypes/Serializations/JSONDataParser.h>
|
||||
#include <DataTypes/Serializations/SubcolumnsTree.h>
|
||||
#include <Common/PODArray.h>
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
|
@ -346,7 +346,7 @@ ColumnPtr ColumnSparse::filter(const Filter & filt, ssize_t) const
|
||||
}
|
||||
|
||||
auto res_values = values->filter(values_filter, values_result_size_hint);
|
||||
return this->create(res_values, std::move(res_offsets), res_offset);
|
||||
return create(res_values, std::move(res_offsets), res_offset);
|
||||
}
|
||||
|
||||
void ColumnSparse::expand(const Filter & mask, bool inverted)
|
||||
|
@ -671,7 +671,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
ReadableSize(rss),
|
||||
ReadableSize(difference));
|
||||
|
||||
total_memory_tracker.setRSS(rss, free_memory_in_allocator_arenas);
|
||||
MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <base/JSON.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
|
@ -203,7 +203,7 @@ public:
|
||||
if (total_connections_in_group >= limits.warning_limit && total_connections_in_group >= mute_warning_until)
|
||||
{
|
||||
LOG_WARNING(log, "Too many active sessions in group {}, count {}, warning limit {}", type, total_connections_in_group, limits.warning_limit);
|
||||
mute_warning_until = roundUp(total_connections_in_group, limits.warning_step);
|
||||
mute_warning_until = roundUp(total_connections_in_group, HTTPConnectionPools::Limits::warning_step);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ public:
|
||||
void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func)
|
||||
{
|
||||
DB::PrefetchingHelper prefetching;
|
||||
size_t prefetch_look_ahead = prefetching.getInitialLookAheadValue();
|
||||
size_t prefetch_look_ahead = DB::PrefetchingHelper::getInitialLookAheadValue();
|
||||
|
||||
size_t i = 0;
|
||||
auto prefetch_it = advanceIterator(this->begin(), prefetch_look_ahead);
|
||||
@ -216,10 +216,10 @@ public:
|
||||
{
|
||||
if constexpr (prefetch)
|
||||
{
|
||||
if (i == prefetching.iterationsToMeasure())
|
||||
if (i == DB::PrefetchingHelper::iterationsToMeasure())
|
||||
{
|
||||
prefetch_look_ahead = prefetching.calcPrefetchLookAhead();
|
||||
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - prefetching.getInitialLookAheadValue());
|
||||
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - DB::PrefetchingHelper::getInitialLookAheadValue());
|
||||
}
|
||||
|
||||
if (prefetch_it != end)
|
||||
|
@ -211,23 +211,13 @@ void Timer::cleanup()
|
||||
#endif
|
||||
|
||||
template <typename ProfilerImpl>
|
||||
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(UInt64 thread_id, int clock_type, UInt32 period, int pause_signal_)
|
||||
QueryProfilerBase<ProfilerImpl>::QueryProfilerBase([[maybe_unused]] UInt64 thread_id, [[maybe_unused]] int clock_type, [[maybe_unused]] UInt32 period, [[maybe_unused]] int pause_signal_)
|
||||
: log(getLogger("QueryProfiler"))
|
||||
, pause_signal(pause_signal_)
|
||||
{
|
||||
#if defined(SANITIZER)
|
||||
UNUSED(thread_id);
|
||||
UNUSED(clock_type);
|
||||
UNUSED(period);
|
||||
UNUSED(pause_signal);
|
||||
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers");
|
||||
#elif defined(__APPLE__)
|
||||
UNUSED(thread_id);
|
||||
UNUSED(clock_type);
|
||||
UNUSED(period);
|
||||
UNUSED(pause_signal);
|
||||
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot work on OSX");
|
||||
#else
|
||||
/// Sanity check.
|
||||
@ -261,6 +251,20 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(UInt64 thread_id, int clock_t
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
template <typename ProfilerImpl>
|
||||
void QueryProfilerBase<ProfilerImpl>::setPeriod([[maybe_unused]] UInt32 period_)
|
||||
{
|
||||
#if defined(SANITIZER)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler disabled because they cannot work under sanitizers");
|
||||
#elif defined(__APPLE__)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QueryProfiler cannot work on OSX");
|
||||
#else
|
||||
timer.set(period_);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
template <typename ProfilerImpl>
|
||||
QueryProfilerBase<ProfilerImpl>::~QueryProfilerBase()
|
||||
{
|
||||
|
@ -57,6 +57,8 @@ public:
|
||||
QueryProfilerBase(UInt64 thread_id, int clock_type, UInt32 period, int pause_signal_);
|
||||
~QueryProfilerBase();
|
||||
|
||||
void setPeriod(UInt32 period_);
|
||||
|
||||
private:
|
||||
void cleanup();
|
||||
|
||||
|
@ -366,7 +366,7 @@ String demangleAndCollapseNames(std::optional<std::string_view> file, const char
|
||||
if (file.has_value())
|
||||
{
|
||||
std::string_view file_copy = file.value();
|
||||
if (auto trim_pos = file_copy.find_last_of('/'); trim_pos != file_copy.npos)
|
||||
if (auto trim_pos = file_copy.find_last_of('/'); trim_pos != std::string_view::npos)
|
||||
file_copy.remove_suffix(file_copy.size() - trim_pos);
|
||||
if (file_copy.ends_with("functional"))
|
||||
return "?";
|
||||
|
@ -490,8 +490,9 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
|
||||
|
||||
|
||||
template class ThreadPoolImpl<std::thread>;
|
||||
template class ThreadPoolImpl<ThreadFromGlobalPoolImpl<false>>;
|
||||
template class ThreadFromGlobalPoolImpl<true>;
|
||||
template class ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>;
|
||||
template class ThreadFromGlobalPoolImpl<true, true>;
|
||||
template class ThreadFromGlobalPoolImpl<true, false>;
|
||||
|
||||
std::unique_ptr<GlobalThreadPool> GlobalThreadPool::the_instance;
|
||||
|
||||
@ -500,7 +501,9 @@ GlobalThreadPool::GlobalThreadPool(
|
||||
size_t max_threads_,
|
||||
size_t max_free_threads_,
|
||||
size_t queue_size_,
|
||||
const bool shutdown_on_exception_)
|
||||
const bool shutdown_on_exception_,
|
||||
UInt64 global_profiler_real_time_period_ns_,
|
||||
UInt64 global_profiler_cpu_time_period_ns_)
|
||||
: FreeThreadPool(
|
||||
CurrentMetrics::GlobalThread,
|
||||
CurrentMetrics::GlobalThreadActive,
|
||||
@ -509,10 +512,12 @@ GlobalThreadPool::GlobalThreadPool(
|
||||
max_free_threads_,
|
||||
queue_size_,
|
||||
shutdown_on_exception_)
|
||||
, global_profiler_real_time_period_ns(global_profiler_real_time_period_ns_)
|
||||
, global_profiler_cpu_time_period_ns(global_profiler_cpu_time_period_ns_)
|
||||
{
|
||||
}
|
||||
|
||||
void GlobalThreadPool::initialize(size_t max_threads, size_t max_free_threads, size_t queue_size)
|
||||
void GlobalThreadPool::initialize(size_t max_threads, size_t max_free_threads, size_t queue_size, UInt64 global_profiler_real_time_period_ns, UInt64 global_profiler_cpu_time_period_ns)
|
||||
{
|
||||
if (the_instance)
|
||||
{
|
||||
@ -520,7 +525,7 @@ void GlobalThreadPool::initialize(size_t max_threads, size_t max_free_threads, s
|
||||
"The global thread pool is initialized twice");
|
||||
}
|
||||
|
||||
the_instance.reset(new GlobalThreadPool(max_threads, max_free_threads, queue_size, false /*shutdown_on_exception*/));
|
||||
the_instance.reset(new GlobalThreadPool(max_threads, max_free_threads, queue_size, false /*shutdown_on_exception*/, global_profiler_real_time_period_ns, global_profiler_cpu_time_period_ns));
|
||||
}
|
||||
|
||||
GlobalThreadPool & GlobalThreadPool::instance()
|
||||
|
@ -172,10 +172,21 @@ class GlobalThreadPool : public FreeThreadPool, private boost::noncopyable
|
||||
size_t max_threads_,
|
||||
size_t max_free_threads_,
|
||||
size_t queue_size_,
|
||||
bool shutdown_on_exception_);
|
||||
bool shutdown_on_exception_,
|
||||
UInt64 global_profiler_real_time_period_ns_,
|
||||
UInt64 global_profiler_cpu_time_period_ns_);
|
||||
|
||||
public:
|
||||
static void initialize(size_t max_threads = 10000, size_t max_free_threads = 1000, size_t queue_size = 10000);
|
||||
UInt64 global_profiler_real_time_period_ns;
|
||||
UInt64 global_profiler_cpu_time_period_ns;
|
||||
|
||||
static void initialize(
|
||||
size_t max_threads = 10000,
|
||||
size_t max_free_threads = 1000,
|
||||
size_t queue_size = 10000,
|
||||
UInt64 global_profiler_real_time_period_ns_ = 0,
|
||||
UInt64 global_profiler_cpu_time_period_ns_ = 0);
|
||||
|
||||
static GlobalThreadPool & instance();
|
||||
static void shutdown();
|
||||
};
|
||||
@ -187,7 +198,7 @@ public:
|
||||
* NOTE: User code should use 'ThreadFromGlobalPool' declared below instead of directly using this class.
|
||||
*
|
||||
*/
|
||||
template <bool propagate_opentelemetry_context = true>
|
||||
template <bool propagate_opentelemetry_context = true, bool global_trace_collector_allowed = true>
|
||||
class ThreadFromGlobalPoolImpl : boost::noncopyable
|
||||
{
|
||||
public:
|
||||
@ -197,11 +208,15 @@ public:
|
||||
explicit ThreadFromGlobalPoolImpl(Function && func, Args &&... args)
|
||||
: state(std::make_shared<State>())
|
||||
{
|
||||
UInt64 global_profiler_real_time_period = GlobalThreadPool::instance().global_profiler_real_time_period_ns;
|
||||
UInt64 global_profiler_cpu_time_period = GlobalThreadPool::instance().global_profiler_cpu_time_period_ns;
|
||||
/// NOTE:
|
||||
/// - If this will throw an exception, the destructor won't be called
|
||||
/// - this pointer cannot be passed in the lambda, since after detach() it will not be valid
|
||||
GlobalThreadPool::instance().scheduleOrThrow([
|
||||
my_state = state,
|
||||
global_profiler_real_time_period,
|
||||
global_profiler_cpu_time_period,
|
||||
my_func = std::forward<Function>(func),
|
||||
my_args = std::make_tuple(std::forward<Args>(args)...)]() mutable /// mutable is needed to destroy capture
|
||||
{
|
||||
@ -220,6 +235,12 @@ public:
|
||||
/// Thread status holds raw pointer on query context, thus it always must be destroyed
|
||||
/// before sending signal that permits to join this thread.
|
||||
DB::ThreadStatus thread_status;
|
||||
if constexpr (global_trace_collector_allowed)
|
||||
{
|
||||
if (unlikely(global_profiler_real_time_period != 0 || global_profiler_cpu_time_period != 0))
|
||||
thread_status.initGlobalProfiler(global_profiler_real_time_period, global_profiler_cpu_time_period);
|
||||
}
|
||||
|
||||
std::apply(function, arguments);
|
||||
},
|
||||
{}, // default priority
|
||||
@ -305,11 +326,12 @@ protected:
|
||||
/// you need to use class, or you need to use ThreadFromGlobalPool below.
|
||||
///
|
||||
/// See the comments of ThreadPool below to know how it works.
|
||||
using ThreadFromGlobalPoolNoTracingContextPropagation = ThreadFromGlobalPoolImpl<false>;
|
||||
using ThreadFromGlobalPoolNoTracingContextPropagation = ThreadFromGlobalPoolImpl<false, true>;
|
||||
|
||||
/// An alias of thread that execute jobs/tasks on global thread pool by implicit passing tracing context on current thread to underlying worker as parent tracing context.
|
||||
/// If jobs/tasks are directly scheduled by using APIs of this class, you need to use this class or you need to use class above.
|
||||
using ThreadFromGlobalPool = ThreadFromGlobalPoolImpl<true>;
|
||||
using ThreadFromGlobalPool = ThreadFromGlobalPoolImpl<true, true>;
|
||||
using ThreadFromGlobalPoolWithoutTraceCollector = ThreadFromGlobalPoolImpl<true, false>;
|
||||
|
||||
/// Recommended thread pool for the case when multiple thread pools are created and destroyed.
|
||||
///
|
||||
|
@ -3,11 +3,12 @@
|
||||
template <typename Thread>
|
||||
class ThreadPoolImpl;
|
||||
|
||||
template <bool propagate_opentelemetry_context>
|
||||
template <bool propagate_opentelemetry_context, bool global_trace_collector_allowed>
|
||||
class ThreadFromGlobalPoolImpl;
|
||||
|
||||
using ThreadFromGlobalPoolNoTracingContextPropagation = ThreadFromGlobalPoolImpl<false>;
|
||||
using ThreadFromGlobalPoolNoTracingContextPropagation = ThreadFromGlobalPoolImpl<false, true>;
|
||||
|
||||
using ThreadFromGlobalPool = ThreadFromGlobalPoolImpl<true>;
|
||||
using ThreadFromGlobalPool = ThreadFromGlobalPoolImpl<true, true>;
|
||||
using ThreadFromGlobalPoolWithoutTraceCollector = ThreadFromGlobalPoolImpl<true, false>;
|
||||
|
||||
using ThreadPool = ThreadPoolImpl<ThreadFromGlobalPoolNoTracingContextPropagation>;
|
||||
|
@ -96,7 +96,7 @@ ThreadStatus::ThreadStatus(bool check_current_thread_on_destruction_)
|
||||
stack_t altstack_description{};
|
||||
altstack_description.ss_sp = alt_stack.getData();
|
||||
altstack_description.ss_flags = 0;
|
||||
altstack_description.ss_size = alt_stack.getSize();
|
||||
altstack_description.ss_size = ThreadStack::getSize();
|
||||
|
||||
if (0 != sigaltstack(&altstack_description, nullptr))
|
||||
{
|
||||
@ -124,6 +124,26 @@ ThreadStatus::ThreadStatus(bool check_current_thread_on_destruction_)
|
||||
#endif
|
||||
}
|
||||
|
||||
void ThreadStatus::initGlobalProfiler([[maybe_unused]] UInt64 global_profiler_real_time_period, [[maybe_unused]] UInt64 global_profiler_cpu_time_period)
|
||||
{
|
||||
#if !defined(SANITIZER) && !defined(CLICKHOUSE_KEEPER_STANDALONE_BUILD) && !defined(__APPLE__)
|
||||
try
|
||||
{
|
||||
if (global_profiler_real_time_period > 0)
|
||||
query_profiler_real = std::make_unique<QueryProfilerReal>(thread_id,
|
||||
/* period= */ static_cast<UInt32>(global_profiler_real_time_period));
|
||||
|
||||
if (global_profiler_cpu_time_period > 0)
|
||||
query_profiler_cpu = std::make_unique<QueryProfilerCPU>(thread_id,
|
||||
/* period= */ static_cast<UInt32>(global_profiler_cpu_time_period));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException("ThreadStatus", "Cannot initialize GlobalProfiler");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
ThreadGroupPtr ThreadStatus::getThreadGroup() const
|
||||
{
|
||||
chassert(current_thread == this);
|
||||
|
@ -307,6 +307,8 @@ public:
|
||||
|
||||
void flushUntrackedMemory();
|
||||
|
||||
void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period);
|
||||
|
||||
private:
|
||||
void applyGlobalSettings();
|
||||
void applyQuerySettings();
|
||||
|
39
src/Common/randomDelay.cpp
Normal file
39
src/Common/randomDelay.cpp
Normal file
@ -0,0 +1,39 @@
|
||||
#include <Common/randomDelay.h>
|
||||
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/randomNumber.h>
|
||||
#include <base/sleep.h>
|
||||
|
||||
|
||||
void randomDelayForMaxMilliseconds(uint64_t milliseconds, LoggerPtr log, const char * start_of_message)
|
||||
{
|
||||
if (milliseconds)
|
||||
{
|
||||
auto count = randomNumber() % milliseconds;
|
||||
|
||||
if (log)
|
||||
{
|
||||
if (start_of_message && !*start_of_message)
|
||||
start_of_message = nullptr;
|
||||
|
||||
LOG_TEST(log, "{}{}Sleeping for {} milliseconds",
|
||||
(start_of_message ? start_of_message : ""),
|
||||
(start_of_message ? ": " : ""),
|
||||
count);
|
||||
}
|
||||
|
||||
sleepForMilliseconds(count);
|
||||
|
||||
if (log)
|
||||
{
|
||||
LOG_TEST(log, "{}{}Awaking after sleeping",
|
||||
(start_of_message ? start_of_message : ""),
|
||||
(start_of_message ? ": " : ""));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void randomDelayForMaxSeconds(uint64_t seconds, LoggerPtr log, const char * start_of_message)
|
||||
{
|
||||
randomDelayForMaxMilliseconds(seconds * 1000, log, start_of_message);
|
||||
}
|
8
src/Common/randomDelay.h
Normal file
8
src/Common/randomDelay.h
Normal file
@ -0,0 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/Logger.h>
|
||||
|
||||
/// Sleeps for random duration between 0 and a specified number of milliseconds, optionally outputs a logging message about that.
|
||||
/// This function can be used to add random delays in tests.
|
||||
void randomDelayForMaxMilliseconds(uint64_t milliseconds, LoggerPtr log = nullptr, const char * start_of_message = nullptr);
|
||||
void randomDelayForMaxSeconds(uint64_t seconds, LoggerPtr log = nullptr, const char * start_of_message = nullptr);
|
@ -38,7 +38,7 @@ void CachedCompressedReadBuffer::prefetch(Priority priority)
|
||||
bool CachedCompressedReadBuffer::nextImpl()
|
||||
{
|
||||
/// Let's check for the presence of a decompressed block in the cache, grab the ownership of this block, if it exists.
|
||||
UInt128 key = cache->hash(path, file_pos);
|
||||
UInt128 key = UncompressedCache::hash(path, file_pos);
|
||||
|
||||
owned_cell = cache->getOrSet(key, [&]()
|
||||
{
|
||||
|
@ -592,7 +592,7 @@ String RecalculateCommand::run()
|
||||
|
||||
String CleanResourcesCommand::run()
|
||||
{
|
||||
keeper_dispatcher.cleanResources();
|
||||
KeeperDispatcher::cleanResources();
|
||||
return "ok";
|
||||
}
|
||||
|
||||
|
@ -1,19 +1,17 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
|
||||
#include <Core/Block.h>
|
||||
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
#include <Columns/ColumnConst.h>
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <Core/Block.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
#include <iterator>
|
||||
#include <base/sort.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
|
||||
|
||||
|
@ -22,6 +22,12 @@ namespace ErrorCodes
|
||||
extern const int DECIMAL_OVERFLOW;
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
T DecimalField<T>::getScaleMultiplier() const
|
||||
{
|
||||
return DecimalUtils::scaleMultiplier<T>(scale);
|
||||
}
|
||||
|
||||
inline Field getBinaryValue(UInt8 type, ReadBuffer & buf)
|
||||
{
|
||||
switch (static_cast<Field::Types::Which>(type))
|
||||
@ -627,5 +633,9 @@ std::string_view Field::getTypeName() const
|
||||
return fieldTypeToString(which);
|
||||
}
|
||||
|
||||
|
||||
template class DecimalField<Decimal32>;
|
||||
template class DecimalField<Decimal64>;
|
||||
template class DecimalField<Decimal128>;
|
||||
template class DecimalField<Decimal256>;
|
||||
template class DecimalField<DateTime64>;
|
||||
}
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <functional>
|
||||
|
||||
#include <Core/CompareHelper.h>
|
||||
#include <Core/DecimalFunctions.h>
|
||||
#include <Core/Defines.h>
|
||||
#include <Core/Types.h>
|
||||
#include <Core/UUID.h>
|
||||
@ -151,7 +150,7 @@ public:
|
||||
|
||||
operator T() const { return dec; } /// NOLINT
|
||||
T getValue() const { return dec; }
|
||||
T getScaleMultiplier() const { return DecimalUtils::scaleMultiplier<T>(scale); }
|
||||
T getScaleMultiplier() const;
|
||||
UInt32 getScale() const { return scale; }
|
||||
|
||||
template <typename U>
|
||||
@ -200,6 +199,12 @@ private:
|
||||
UInt32 scale;
|
||||
};
|
||||
|
||||
extern template class DecimalField<Decimal32>;
|
||||
extern template class DecimalField<Decimal64>;
|
||||
extern template class DecimalField<Decimal128>;
|
||||
extern template class DecimalField<Decimal256>;
|
||||
extern template class DecimalField<DateTime64>;
|
||||
|
||||
template <typename T> constexpr bool is_decimal_field = false;
|
||||
template <> constexpr inline bool is_decimal_field<DecimalField<Decimal32>> = true;
|
||||
template <> constexpr inline bool is_decimal_field<DecimalField<Decimal64>> = true;
|
||||
|
@ -137,6 +137,8 @@ namespace DB
|
||||
M(UInt64, http_connections_soft_limit, 100, "Connections above this limit have significantly shorter time to live. The limit applies to the http connections which do not belong to any disk or storage.", 0) \
|
||||
M(UInt64, http_connections_warn_limit, 1000, "Warning massages are written to the logs if number of in-use connections are higher than this limit. The limit applies to the http connections which do not belong to any disk or storage.", 0) \
|
||||
M(UInt64, http_connections_store_limit, 5000, "Connections above this limit reset after use. Set to 0 to turn connection cache off. The limit applies to the http connections which do not belong to any disk or storage.", 0) \
|
||||
M(UInt64, global_profiler_real_time_period_ns, 0, "Period for real clock timer of global profiler (in nanoseconds). Set 0 value to turn off the real clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||
M(UInt64, global_profiler_cpu_time_period_ns, 0, "Period for CPU clock timer of global profiler (in nanoseconds). Set 0 value to turn off the CPU clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||
|
||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
#include <magic_enum.hpp>
|
||||
#include <Access/Common/SQLSecurityDefs.h>
|
||||
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,13 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <Poco/Timespan.h>
|
||||
#include <Poco/URI.h>
|
||||
#include <base/types.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/MultiEnum.h>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <chrono>
|
||||
#include <string_view>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/MultiEnum.h>
|
||||
#include <base/types.h>
|
||||
#include <Poco/Timespan.h>
|
||||
#include <Poco/URI.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <IO/Operators.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier_fwd.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
@ -32,6 +33,11 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
String DataTypeAggregateFunction::getFunctionName() const
|
||||
{
|
||||
return function->getName();
|
||||
}
|
||||
|
||||
|
||||
String DataTypeAggregateFunction::doGetName() const
|
||||
{
|
||||
@ -52,6 +58,25 @@ size_t DataTypeAggregateFunction::getVersion() const
|
||||
return function->getDefaultVersion();
|
||||
}
|
||||
|
||||
DataTypePtr DataTypeAggregateFunction::getReturnType() const
|
||||
{
|
||||
return function->getResultType();
|
||||
}
|
||||
|
||||
DataTypePtr DataTypeAggregateFunction::getReturnTypeToPredict() const
|
||||
{
|
||||
return function->getReturnTypeToPredict();
|
||||
}
|
||||
|
||||
bool DataTypeAggregateFunction::isVersioned() const
|
||||
{
|
||||
return function->isVersioned();
|
||||
}
|
||||
|
||||
void DataTypeAggregateFunction::updateVersionFromRevision(size_t revision, bool if_empty) const
|
||||
{
|
||||
setVersion(function->getVersionFromRevision(revision), if_empty);
|
||||
}
|
||||
|
||||
String DataTypeAggregateFunction::getNameImpl(bool with_version) const
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction_fwd.h>
|
||||
#include <Core/Field.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
|
||||
@ -39,7 +39,7 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
String getFunctionName() const { return function->getName(); }
|
||||
String getFunctionName() const;
|
||||
AggregateFunctionPtr getFunction() const { return function; }
|
||||
|
||||
String doGetName() const override;
|
||||
@ -51,8 +51,8 @@ public:
|
||||
|
||||
bool canBeInsideNullable() const override { return false; }
|
||||
|
||||
DataTypePtr getReturnType() const { return function->getResultType(); }
|
||||
DataTypePtr getReturnTypeToPredict() const { return function->getReturnTypeToPredict(); }
|
||||
DataTypePtr getReturnType() const;
|
||||
DataTypePtr getReturnTypeToPredict() const;
|
||||
DataTypes getArgumentsDataTypes() const { return argument_types; }
|
||||
|
||||
MutableColumnPtr createColumn() const override;
|
||||
@ -69,7 +69,7 @@ public:
|
||||
SerializationPtr doGetDefaultSerialization() const override;
|
||||
bool supportsSparseSerialization() const override { return false; }
|
||||
|
||||
bool isVersioned() const { return function->isVersioned(); }
|
||||
bool isVersioned() const;
|
||||
|
||||
/// Version is not empty only if it was parsed from AST or implicitly cast to 0 or version according
|
||||
/// to server revision.
|
||||
@ -84,10 +84,7 @@ public:
|
||||
version = version_;
|
||||
}
|
||||
|
||||
void updateVersionFromRevision(size_t revision, bool if_empty) const
|
||||
{
|
||||
setVersion(function->getVersionFromRevision(revision), if_empty);
|
||||
}
|
||||
void updateVersionFromRevision(size_t revision, bool if_empty) const;
|
||||
};
|
||||
|
||||
void setVersionToAggregateFunctions(DataTypePtr & type, bool if_empty, std::optional<size_t> revision = std::nullopt);
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
|
@ -1,13 +1,18 @@
|
||||
#pragma once
|
||||
|
||||
#include <AggregateFunctions/IAggregateFunction_fwd.h>
|
||||
#include <Core/Field.h>
|
||||
#include <DataTypes/DataTypeCustom.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class IDataType;
|
||||
using DataTypePtr = std::shared_ptr<const IDataType>;
|
||||
using DataTypes = std::vector<DataTypePtr>;
|
||||
|
||||
/** The type SimpleAggregateFunction(fct, type) is meant to be used in an AggregatingMergeTree. It behaves like a standard
|
||||
* data type but when rows are merged, an aggregation function is applied.
|
||||
*
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user