Merge branch 'master' of github.com:ClickHouse/ClickHouse into analyzer-fixes-bugs

This commit is contained in:
Nikita Mikhaylov 2024-04-08 16:23:17 +00:00
commit 48b1c94ddd
474 changed files with 4039 additions and 1543 deletions

View File

@ -96,7 +96,6 @@ Checks: [
'-modernize-use-default-member-init', '-modernize-use-default-member-init',
'-modernize-use-emplace', '-modernize-use-emplace',
'-modernize-use-nodiscard', '-modernize-use-nodiscard',
'-modernize-use-override',
'-modernize-use-trailing-return-type', '-modernize-use-trailing-return-type',
'-performance-inefficient-string-concatenation', '-performance-inefficient-string-concatenation',

View File

@ -123,7 +123,6 @@
* Something was wrong with Apache Hive, which is experimental and not supported. [#60262](https://github.com/ClickHouse/ClickHouse/pull/60262) ([shanfengp](https://github.com/Aed-p)). * Something was wrong with Apache Hive, which is experimental and not supported. [#60262](https://github.com/ClickHouse/ClickHouse/pull/60262) ([shanfengp](https://github.com/Aed-p)).
* An improvement for experimental parallel replicas: force reanalysis if parallel replicas changed [#60362](https://github.com/ClickHouse/ClickHouse/pull/60362) ([Raúl Marín](https://github.com/Algunenano)). * An improvement for experimental parallel replicas: force reanalysis if parallel replicas changed [#60362](https://github.com/ClickHouse/ClickHouse/pull/60362) ([Raúl Marín](https://github.com/Algunenano)).
* Fix usage of plain metadata type with new disks configuration option [#60396](https://github.com/ClickHouse/ClickHouse/pull/60396) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix usage of plain metadata type with new disks configuration option [#60396](https://github.com/ClickHouse/ClickHouse/pull/60396) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Don't allow to set max_parallel_replicas to 0 as it doesn't make sense [#60430](https://github.com/ClickHouse/ClickHouse/pull/60430) ([Kruglov Pavel](https://github.com/Avogar)).
* Try to fix logical error 'Cannot capture column because it has incompatible type' in mapContainsKeyLike [#60451](https://github.com/ClickHouse/ClickHouse/pull/60451) ([Kruglov Pavel](https://github.com/Avogar)). * Try to fix logical error 'Cannot capture column because it has incompatible type' in mapContainsKeyLike [#60451](https://github.com/ClickHouse/ClickHouse/pull/60451) ([Kruglov Pavel](https://github.com/Avogar)).
* Avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). * Avoid calculation of scalar subqueries for CREATE TABLE. [#60464](https://github.com/ClickHouse/ClickHouse/pull/60464) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)). * Fix deadlock in parallel parsing when lots of rows are skipped due to errors [#60516](https://github.com/ClickHouse/ClickHouse/pull/60516) ([Kruglov Pavel](https://github.com/Avogar)).

View File

@ -13,8 +13,6 @@
#include <tuple> #include <tuple>
#include <limits> #include <limits>
#include <boost/math/special_functions/fpclassify.hpp>
// NOLINTBEGIN(*) // NOLINTBEGIN(*)
/// Use same extended double for all platforms /// Use same extended double for all platforms
@ -22,6 +20,7 @@
#define CONSTEXPR_FROM_DOUBLE constexpr #define CONSTEXPR_FROM_DOUBLE constexpr
using FromDoubleIntermediateType = long double; using FromDoubleIntermediateType = long double;
#else #else
#include <boost/math/special_functions/fpclassify.hpp>
#include <boost/multiprecision/cpp_bin_float.hpp> #include <boost/multiprecision/cpp_bin_float.hpp>
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended` /// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
#define CONSTEXPR_FROM_DOUBLE #define CONSTEXPR_FROM_DOUBLE
@ -309,6 +308,13 @@ struct integer<Bits, Signed>::_impl
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max(); constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
static_assert(std::is_same_v<T, double> || std::is_same_v<T, FromDoubleIntermediateType>); static_assert(std::is_same_v<T, double> || std::is_same_v<T, FromDoubleIntermediateType>);
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast). /// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
#if (LDBL_MANT_DIG == 64)
if (!std::isfinite(t))
{
self = 0;
return;
}
#else
if constexpr (std::is_same_v<T, double>) if constexpr (std::is_same_v<T, double>)
{ {
if (!std::isfinite(t)) if (!std::isfinite(t))
@ -325,6 +331,7 @@ struct integer<Bits, Signed>::_impl
return; return;
} }
} }
#endif
const T alpha = t / static_cast<T>(max_int); const T alpha = t / static_cast<T>(max_int);

View File

@ -314,13 +314,13 @@ static int read_unicode(json_stream *json)
if (l < 0xdc00 || l > 0xdfff) { if (l < 0xdc00 || l > 0xdfff) {
json_error(json, "invalid surrogate pair continuation \\u%04lx out " json_error(json, "invalid surrogate pair continuation \\u%04lx out "
"of range (dc00-dfff)", l); "of range (dc00-dfff)", (unsigned long)l);
return -1; return -1;
} }
cp = ((h - 0xd800) * 0x400) + ((l - 0xdc00) + 0x10000); cp = ((h - 0xd800) * 0x400) + ((l - 0xdc00) + 0x10000);
} else if (cp >= 0xdc00 && cp <= 0xdfff) { } else if (cp >= 0xdc00 && cp <= 0xdfff) {
json_error(json, "dangling surrogate \\u%04lx", cp); json_error(json, "dangling surrogate \\u%04lx", (unsigned long)cp);
return -1; return -1;
} }

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 4a12f99dfc9d47c687ff7700b927cc76856225d1 Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0

View File

@ -32,6 +32,7 @@ set(SRCS
"${LIBRARY_DIR}/src/handle_custom_notification.cxx" "${LIBRARY_DIR}/src/handle_custom_notification.cxx"
"${LIBRARY_DIR}/src/handle_vote.cxx" "${LIBRARY_DIR}/src/handle_vote.cxx"
"${LIBRARY_DIR}/src/launcher.cxx" "${LIBRARY_DIR}/src/launcher.cxx"
"${LIBRARY_DIR}/src/log_entry.cxx"
"${LIBRARY_DIR}/src/srv_config.cxx" "${LIBRARY_DIR}/src/srv_config.cxx"
"${LIBRARY_DIR}/src/snapshot_sync_req.cxx" "${LIBRARY_DIR}/src/snapshot_sync_req.cxx"
"${LIBRARY_DIR}/src/snapshot_sync_ctx.cxx" "${LIBRARY_DIR}/src/snapshot_sync_ctx.cxx"
@ -50,6 +51,12 @@ else()
target_compile_definitions(_nuraft PRIVATE USE_BOOST_ASIO=1 BOOST_ASIO_STANDALONE=1) target_compile_definitions(_nuraft PRIVATE USE_BOOST_ASIO=1 BOOST_ASIO_STANDALONE=1)
endif() endif()
target_link_libraries (_nuraft PRIVATE clickhouse_common_io)
# We must have it PUBLIC here because some headers which depend on it directly
# included in clickhouse
target_compile_definitions(_nuraft PUBLIC USE_CLICKHOUSE_THREADS=1)
MESSAGE(STATUS "Will use clickhouse threads for NuRaft")
target_include_directories (_nuraft SYSTEM PRIVATE "${LIBRARY_DIR}/include/libnuraft") target_include_directories (_nuraft SYSTEM PRIVATE "${LIBRARY_DIR}/include/libnuraft")
# for some reason include "asio.h" directly without "boost/" prefix. # for some reason include "asio.h" directly without "boost/" prefix.
target_include_directories (_nuraft SYSTEM PRIVATE "${ClickHouse_SOURCE_DIR}/contrib/boost/boost") target_include_directories (_nuraft SYSTEM PRIVATE "${ClickHouse_SOURCE_DIR}/contrib/boost/boost")

View File

@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc # lts / testing / prestable / etc
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.3.1.2672" ARG VERSION="24.3.2.23"
ARG PACKAGES="clickhouse-keeper" ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS="" ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc # lts / testing / prestable / etc
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.3.1.2672" ARG VERSION="24.3.2.23"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS="" ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -27,7 +27,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.3.1.2672" ARG VERSION="24.3.2.23"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image # set non-empty deb_location_url url to create a docker image

View File

@ -16,6 +16,8 @@ ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
dpkg -i package_folder/clickhouse-common-static_*.deb dpkg -i package_folder/clickhouse-common-static_*.deb
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb
dpkg -i package_folder/clickhouse-library-bridge_*.deb
dpkg -i package_folder/clickhouse-server_*.deb dpkg -i package_folder/clickhouse-server_*.deb
dpkg -i package_folder/clickhouse-client_*.deb dpkg -i package_folder/clickhouse-client_*.deb

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.2.23-lts (8b7d910960c) FIXME as compared to v24.3.1.2672-lts (2c5c589a882)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix logical error in group_by_use_nulls + grouping set + analyzer + materialize/constant [#61567](https://github.com/ClickHouse/ClickHouse/pull/61567) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix external table cannot parse data type Bool [#62115](https://github.com/ClickHouse/ClickHouse/pull/62115) ([Duc Canh Le](https://github.com/canhld94)).
* Revert "Merge pull request [#61564](https://github.com/ClickHouse/ClickHouse/issues/61564) from liuneng1994/optimize_in_single_value" [#62135](https://github.com/ClickHouse/ClickHouse/pull/62135) ([Raúl Marín](https://github.com/Algunenano)).
#### CI Fix or Improvement (changelog entry is not required)
* Backported in [#62030](https://github.com/ClickHouse/ClickHouse/issues/62030):. [#61869](https://github.com/ClickHouse/ClickHouse/pull/61869) ([Nikita Fomichev](https://github.com/fm4v)).
* Backported in [#62057](https://github.com/ClickHouse/ClickHouse/issues/62057): ... [#62044](https://github.com/ClickHouse/ClickHouse/pull/62044) ([Max K.](https://github.com/maxknv)).
* Backported in [#62204](https://github.com/ClickHouse/ClickHouse/issues/62204):. [#62190](https://github.com/ClickHouse/ClickHouse/pull/62190) ([Konstantin Bogdanov](https://github.com/thevar1able)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix some crashes with analyzer and group_by_use_nulls. [#61933](https://github.com/ClickHouse/ClickHouse/pull/61933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix scalars create as select [#61998](https://github.com/ClickHouse/ClickHouse/pull/61998) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Ignore IfChainToMultiIfPass if returned type changed. [#62059](https://github.com/ClickHouse/ClickHouse/pull/62059) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix type for ConvertInToEqualPass [#62066](https://github.com/ClickHouse/ClickHouse/pull/62066) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Revert output Pretty in tty [#62090](https://github.com/ClickHouse/ClickHouse/pull/62090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).

View File

@ -18,6 +18,9 @@ Run the command:
```bash ```bash
wget https://s3.amazonaws.com/menusdata.nypl.org/gzips/2021_08_01_07_01_17_data.tgz wget https://s3.amazonaws.com/menusdata.nypl.org/gzips/2021_08_01_07_01_17_data.tgz
# Option: Validate the checksum
md5sum 2021_08_01_07_01_17_data.tgz
# Checksum should be equal to: db6126724de939a5481e3160a2d67d15
``` ```
Replace the link to the up to date link from http://menus.nypl.org/data if needed. Replace the link to the up to date link from http://menus.nypl.org/data if needed.

View File

@ -79,7 +79,7 @@ The supported formats are:
| [RowBinary](#rowbinary) | ✔ | ✔ | | [RowBinary](#rowbinary) | ✔ | ✔ |
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ | | [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | | [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | | | [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | |
| [Native](#native) | ✔ | ✔ | | [Native](#native) | ✔ | ✔ |
| [Null](#null) | ✗ | ✔ | | [Null](#null) | ✗ | ✔ |
| [XML](#xml) | ✗ | ✔ | | [XML](#xml) | ✗ | ✔ |
@ -1487,7 +1487,7 @@ Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000
- [output_format_pretty_max_value_width](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_value_width) - Maximum width of value to display in Pretty formats. If greater - it will be cut. Default value - `10000`. - [output_format_pretty_max_value_width](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_value_width) - Maximum width of value to display in Pretty formats. If greater - it will be cut. Default value - `10000`.
- [output_format_pretty_color](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`. - [output_format_pretty_color](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`.
- [output_format_pretty_grid_charset](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`. - [output_format_pretty_grid_charset](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`.
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `false`. - [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `true`.
## RowBinary {#rowbinary} ## RowBinary {#rowbinary}
@ -2465,7 +2465,7 @@ Result:
## Npy {#data-format-npy} ## Npy {#data-format-npy}
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse: This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
| Npy type | ClickHouse type | | Npy type | ClickHouse type |
|:--------:|:---------------:| |:--------:|:---------------:|
| b1 | UInt8 | | b1 | UInt8 |

View File

@ -507,16 +507,18 @@ Example:
``` xml ``` xml
<http_handlers> <http_handlers>
<rule> <rule>
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url> <url><![CDATA[regex:/query_param_with_url/(?P<name_1>[^/]+)]]></url>
<methods>GET</methods> <methods>GET</methods>
<headers> <headers>
<XXX>TEST_HEADER_VALUE</XXX> <XXX>TEST_HEADER_VALUE</XXX>
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX> <PARAMS_XXX><![CDATA[regex:(?P<name_2>[^/]+)]]></PARAMS_XXX>
</headers> </headers>
<handler> <handler>
<type>predefined_query_handler</type> <type>predefined_query_handler</type>
<query>SELECT value FROM system.settings WHERE name = {name_1:String}</query> <query>
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query> SELECT name, value FROM system.settings
WHERE name IN ({name_1:String}, {name_2:String})
</query>
</handler> </handler>
</rule> </rule>
<defaults/> <defaults/>
@ -524,13 +526,13 @@ Example:
``` ```
``` bash ``` bash
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_final_threads?max_threads=1&max_final_threads=2' $ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_final_threads' 'http://localhost:8123/query_param_with_url/max_threads?max_threads=1&max_final_threads=2'
1 max_final_threads 2
max_final_threads 2 max_threads 1
``` ```
:::note :::note
In one `predefined_query_handler` only supports one `query` of an insert type. In one `predefined_query_handler` only one `query` is supported.
::: :::
### dynamic_query_handler {#dynamic_query_handler} ### dynamic_query_handler {#dynamic_query_handler}

View File

@ -436,7 +436,7 @@ Default: 0
Restriction on dropping partitions. Restriction on dropping partitions.
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you cant drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query. If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you cant drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query.
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file. This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
Default value: 50 GB. Default value: 50 GB.
The value 0 means that you can drop partitions without any restrictions. The value 0 means that you can drop partitions without any restrictions.
@ -518,7 +518,7 @@ Restriction on deleting tables.
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you cant delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query. If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you cant delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query.
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file. This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
Default value: 50 GB. Default value: 50 GB.
The value 0 means that you can delete all tables without any restrictions. The value 0 means that you can delete all tables without any restrictions.
@ -1570,7 +1570,7 @@ Restriction on deleting tables.
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you cant delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query. If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you cant delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query.
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file. This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
Default value: 50 GB. Default value: 50 GB.
@ -1588,7 +1588,7 @@ Restriction on dropping partitions.
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you cant drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query. If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you cant drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query.
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file. This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
Default value: 50 GB. Default value: 50 GB.

View File

@ -1642,7 +1642,7 @@ Possible values:
- 0 — Output without row numbers. - 0 — Output without row numbers.
- 1 — Output with row numbers. - 1 — Output with row numbers.
Default value: `0`. Default value: `1`.
**Example** **Example**

View File

@ -36,7 +36,7 @@ E.g. configuration option
<s3> <s3>
<type>s3</type> <type>s3</type>
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint> <endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
<use_invironment_credentials>1</use_invironment_credentials> <use_environment_credentials>1</use_environment_credentials>
</s3> </s3>
``` ```
@ -47,7 +47,7 @@ is equal to configuration (from `24.1`):
<object_storage_type>s3</object_storage_type> <object_storage_type>s3</object_storage_type>
<metadata_type>local</metadata_type> <metadata_type>local</metadata_type>
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint> <endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
<use_invironment_credentials>1</use_invironment_credentials> <use_environment_credentials>1</use_environment_credentials>
</s3> </s3>
``` ```
@ -56,7 +56,7 @@ Configuration
<s3_plain> <s3_plain>
<type>s3_plain</type> <type>s3_plain</type>
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint> <endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
<use_invironment_credentials>1</use_invironment_credentials> <use_environment_credentials>1</use_environment_credentials>
</s3_plain> </s3_plain>
``` ```
@ -67,7 +67,7 @@ is equal to
<object_storage_type>s3</object_storage_type> <object_storage_type>s3</object_storage_type>
<metadata_type>plain</metadata_type> <metadata_type>plain</metadata_type>
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint> <endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
<use_invironment_credentials>1</use_invironment_credentials> <use_environment_credentials>1</use_environment_credentials>
</s3_plain> </s3_plain>
``` ```
@ -79,7 +79,7 @@ Example of full storage configuration will look like:
<s3> <s3>
<type>s3</type> <type>s3</type>
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint> <endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
<use_invironment_credentials>1</use_invironment_credentials> <use_environment_credentials>1</use_environment_credentials>
</s3> </s3>
</disks> </disks>
<policies> <policies>
@ -105,7 +105,7 @@ Starting with 24.1 clickhouse version, it can also look like:
<object_storage_type>s3</object_storage_type> <object_storage_type>s3</object_storage_type>
<metadata_type>local</metadata_type> <metadata_type>local</metadata_type>
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint> <endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
<use_invironment_credentials>1</use_invironment_credentials> <use_environment_credentials>1</use_environment_credentials>
</s3> </s3>
</disks> </disks>
<policies> <policies>
@ -324,7 +324,7 @@ Configuration:
<s3_plain> <s3_plain>
<type>s3_plain</type> <type>s3_plain</type>
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint> <endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
<use_invironment_credentials>1</use_invironment_credentials> <use_environment_credentials>1</use_environment_credentials>
</s3_plain> </s3_plain>
``` ```
@ -337,7 +337,7 @@ Configuration:
<object_storage_type>azure</object_storage_type> <object_storage_type>azure</object_storage_type>
<metadata_type>plain</metadata_type> <metadata_type>plain</metadata_type>
<endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint> <endpoint>https://s3.eu-west-1.amazonaws.com/clickhouse-eu-west-1.clickhouse.com/data/</endpoint>
<use_invironment_credentials>1</use_invironment_credentials> <use_environment_credentials>1</use_environment_credentials>
</s3_plain> </s3_plain>
``` ```

View File

@ -483,7 +483,7 @@ Where:
- `r1`- the number of unique visitors who visited the site during 2020-01-01 (the `cond1` condition). - `r1`- the number of unique visitors who visited the site during 2020-01-01 (the `cond1` condition).
- `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions). - `r2`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-02 (`cond1` and `cond2` conditions).
- `r3`- the number of unique visitors who visited the site during a specific time period between 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions). - `r3`- the number of unique visitors who visited the site during a specific time period on 2020-01-01 and 2020-01-03 (`cond1` and `cond3` conditions).
## uniqUpTo(N)(x) ## uniqUpTo(N)(x)

View File

@ -7,26 +7,33 @@ sidebar_position: 351
[Cramer's V](https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V) (sometimes referred to as Cramer's phi) is a measure of association between two columns in a table. The result of the `cramersV` function ranges from 0 (corresponding to no association between the variables) to 1 and can reach 1 only when each value is completely determined by the other. It may be viewed as the association between two variables as a percentage of their maximum possible variation. [Cramer's V](https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V) (sometimes referred to as Cramer's phi) is a measure of association between two columns in a table. The result of the `cramersV` function ranges from 0 (corresponding to no association between the variables) to 1 and can reach 1 only when each value is completely determined by the other. It may be viewed as the association between two variables as a percentage of their maximum possible variation.
:::note
For a bias corrected version of Cramer's V see: [cramersVBiasCorrected](./cramersvbiascorrected.md)
:::
**Syntax** **Syntax**
``` sql ``` sql
cramersV(column1, column2) cramersV(column1, column2)
``` ```
**Arguments** **Parameters**
- `column1` and `column2` are the columns to be compared - `column1`: first column to be compared.
- `column2`: second column to be compared.
**Returned value** **Returned value**
- a value between 0 (corresponding to no association between the columns' values) to 1 (complete association). - a value between 0 (corresponding to no association between the columns' values) to 1 (complete association).
**Return type** is always [Float64](../../../sql-reference/data-types/float.md). Type: always [Float64](../../../sql-reference/data-types/float.md).
**Example** **Example**
The following two columns being compared below have no association with each other, so the result of `cramersV` is 0: The following two columns being compared below have no association with each other, so the result of `cramersV` is 0:
Query:
``` sql ``` sql
SELECT SELECT
cramersV(a, b) cramersV(a, b)

View File

@ -5,31 +5,31 @@ sidebar_position: 352
# cramersVBiasCorrected # cramersVBiasCorrected
Cramer's V is a measure of association between two columns in a table. The result of the [`cramersV` function](./cramersv.md) ranges from 0 (corresponding to no association between the variables) to 1 and can reach 1 only when each value is completely determined by the other. The function can be heavily biased, so this version of Cramer's V uses the [bias correction](https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V#Bias_correction). Cramer's V is a measure of association between two columns in a table. The result of the [`cramersV` function](./cramersv.md) ranges from 0 (corresponding to no association between the variables) to 1 and can reach 1 only when each value is completely determined by the other. The function can be heavily biased, so this version of Cramer's V uses the [bias correction](https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V#Bias_correction).
**Syntax** **Syntax**
``` sql ``` sql
cramersVBiasCorrected(column1, column2) cramersVBiasCorrected(column1, column2)
``` ```
**Arguments** **Parameters**
- `column1` and `column2` are the columns to be compared - `column1`: first column to be compared.
- `column2`: second column to be compared.
**Returned value** **Returned value**
- a value between 0 (corresponding to no association between the columns' values) to 1 (complete association). - a value between 0 (corresponding to no association between the columns' values) to 1 (complete association).
**Return type** is always [Float64](../../../sql-reference/data-types/float.md). Type: always [Float64](../../../sql-reference/data-types/float.md).
**Example** **Example**
The following two columns being compared below have a small association with each other. Notice the result of `cramersVBiasCorrected` is smaller than the result of `cramersV`: The following two columns being compared below have a small association with each other. Notice the result of `cramersVBiasCorrected` is smaller than the result of `cramersV`:
Query:
``` sql ``` sql
SELECT SELECT
cramersV(a, b), cramersV(a, b),

View File

@ -36,9 +36,9 @@ You can explicitly set a time zone for `DateTime`-type columns when creating a t
The [clickhouse-client](../../interfaces/cli.md) applies the server time zone by default if a time zone isnt explicitly set when initializing the data type. To use the client time zone, run `clickhouse-client` with the `--use_client_time_zone` parameter. The [clickhouse-client](../../interfaces/cli.md) applies the server time zone by default if a time zone isnt explicitly set when initializing the data type. To use the client time zone, run `clickhouse-client` with the `--use_client_time_zone` parameter.
ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings.md#settings-date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function. ClickHouse outputs values depending on the value of the [date_time_output_format](../../operations/settings/settings-formats.md#date_time_output_format) setting. `YYYY-MM-DD hh:mm:ss` text format by default. Additionally, you can change the output with the [formatDateTime](../../sql-reference/functions/date-time-functions.md#formatdatetime) function.
When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings.md#settings-date_time_input_format) setting. When inserting data into ClickHouse, you can use different formats of date and time strings, depending on the value of the [date_time_input_format](../../operations/settings/settings-formats.md#date_time_input_format) setting.
## Examples ## Examples
@ -147,8 +147,8 @@ Time shifts for multiple days. Some pacific islands changed their timezone offse
- [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md) - [Type conversion functions](../../sql-reference/functions/type-conversion-functions.md)
- [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md) - [Functions for working with dates and times](../../sql-reference/functions/date-time-functions.md)
- [Functions for working with arrays](../../sql-reference/functions/array-functions.md) - [Functions for working with arrays](../../sql-reference/functions/array-functions.md)
- [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#settings-date_time_input_format) - [The `date_time_input_format` setting](../../operations/settings/settings-formats.md#date_time_input_format)
- [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#settings-date_time_output_format) - [The `date_time_output_format` setting](../../operations/settings/settings-formats.md#date_time_output_format)
- [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [The `timezone` server configuration parameter](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
- [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone) - [The `session_timezone` setting](../../operations/settings/settings.md#session_timezone)
- [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime) - [Operators for working with dates and times](../../sql-reference/operators/index.md#operators-datetime)

View File

@ -190,22 +190,67 @@ SELECT toTypeName(variantType(v)) FROM test LIMIT 1;
└─────────────────────────────────────────────────────────────────────┘ └─────────────────────────────────────────────────────────────────────┘
``` ```
## Conversion between Variant column and other columns ## Conversion between a Variant column and other columns
There are 3 possible conversions that can be performed with Variant column. There are 4 possible conversions that can be performed with a column of type `Variant`.
### Converting an ordinary column to a Variant column ### Converting a String column to a Variant column
It is possible to convert ordinary column with type `T` to a `Variant` column containing this type: Conversion from `String` to `Variant` is performed by parsing a value of `Variant` type from the string value:
```sql ```sql
SELECT toTypeName(variant) as type_name, 'Hello, World!'::Variant(UInt64, String, Array(UInt64)) as variant; SELECT '42'::Variant(String, UInt64) as variant, variantType(variant) as variant_type
``` ```
```text ```text
┌─type_name──────────────────────────────┬─variant───────┐ ┌─variant─┬─variant_type─┐
│ Variant(Array(UInt64), String, UInt64) │ Hello, World! │ │ 42 │ UInt64 │
└────────────────────────────────────────┴───────────────┘ └─────────┴──────────────┘
```
```sql
SELECT '[1, 2, 3]'::Variant(String, Array(UInt64)) as variant, variantType(variant) as variant_type
```
```text
┌─variant─┬─variant_type──┐
│ [1,2,3] │ Array(UInt64) │
└─────────┴───────────────┘
```
```sql
SELECT CAST(map('key1', '42', 'key2', 'true', 'key3', '2020-01-01'), 'Map(String, Variant(UInt64, Bool, Date))') as map_of_variants, mapApply((k, v) -> (k, variantType(v)), map_of_variants) as map_of_variant_types```
```
```text
┌─map_of_variants─────────────────────────────┬─map_of_variant_types──────────────────────────┐
│ {'key1':42,'key2':true,'key3':'2020-01-01'} │ {'key1':'UInt64','key2':'Bool','key3':'Date'} │
└─────────────────────────────────────────────┴───────────────────────────────────────────────┘
```
### Converting an ordinary column to a Variant column
It is possible to convert an ordinary column with type `T` to a `Variant` column containing this type:
```sql
SELECT toTypeName(variant) as type_name, [1,2,3]::Array(UInt64)::Variant(UInt64, String, Array(UInt64)) as variant, variantType(variant) as variant_name
```
```text
┌─type_name──────────────────────────────┬─variant─┬─variant_name──┐
│ Variant(Array(UInt64), String, UInt64) │ [1,2,3] │ Array(UInt64) │
└────────────────────────────────────────┴─────────┴───────────────┘
```
Note: converting from `String` type is always performed through parsing, if you need to convert `String` column to `String` variant of a `Variant` without parsing, you can do the following:
```sql
SELECT '[1, 2, 3]'::Variant(String)::Variant(String, Array(UInt64), UInt64) as variant, variantType(variant) as variant_type
```
```sql
┌─variant───┬─variant_type─┐
│ [1, 2, 3] │ String │
└───────────┴──────────────┘
``` ```
### Converting a Variant column to an ordinary column ### Converting a Variant column to an ordinary column
@ -395,3 +440,37 @@ SELECT v, variantType(v) FROM test ORDER by v;
│ 100 │ UInt32 │ │ 100 │ UInt32 │
└─────┴────────────────┘ └─────┴────────────────┘
``` ```
## JSONExtract functions with Variant
All `JSONExtract*` functions support `Variant` type:
```sql
SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Variant(UInt32, String, Array(UInt32))') AS variant, variantType(variant) AS variant_type;
```
```text
┌─variant─┬─variant_type──┐
│ [1,2,3] │ Array(UInt32) │
└─────────┴───────────────┘
```
```sql
SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_variants, mapApply((k, v) -> (k, variantType(v)), map_of_variants) AS map_of_variant_types
```
```text
┌─map_of_variants──────────────────┬─map_of_variant_types────────────────────────────┐
│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │
└──────────────────────────────────┴─────────────────────────────────────────────────┘
```
```sql
SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS variants, arrayMap(x -> (x.1, variantType(x.2)), variants) AS variant_types
```
```text
┌─variants───────────────────────────────┬─variant_types─────────────────────────────────────────┐
│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │
└────────────────────────────────────────┴───────────────────────────────────────────────────────┘
```

View File

@ -99,7 +99,7 @@ Alias: `OCTET_LENGTH`
Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined. Returns the length of a string in Unicode code points (not: in bytes or characters). It assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
Alias: Alias:
- `CHAR_LENGTH`` - `CHAR_LENGTH`
- `CHARACTER_LENGTH` - `CHARACTER_LENGTH`
## leftPad ## leftPad

View File

@ -8,7 +8,7 @@ sidebar_label: VIEW
You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process. You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process.
This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underling storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause. This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underlying storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause.
**Example with TO table** **Example with TO table**

View File

@ -434,16 +434,18 @@ $ curl -v 'http://localhost:8123/predefined_query'
``` xml ``` xml
<http_handlers> <http_handlers>
<rule> <rule>
<url><![CDATA[regex:/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url> <url><![CDATA[regex:/query_param_with_url/(?P<name_1>[^/]+)]]></url>
<methods>GET</methods> <methods>GET</methods>
<headers> <headers>
<XXX>TEST_HEADER_VALUE</XXX> <XXX>TEST_HEADER_VALUE</XXX>
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX> <PARAMS_XXX><![CDATA[regex:(?P<name_2>[^/]+)]]></PARAMS_XXX>
</headers> </headers>
<handler> <handler>
<type>predefined_query_handler</type> <type>predefined_query_handler</type>
<query>SELECT value FROM system.settings WHERE name = {name_1:String}</query> <query>
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query> SELECT name, value FROM system.settings
WHERE name IN ({name_1:String}, {name_2:String})
</query>
</handler> </handler>
</rule> </rule>
<defaults/> <defaults/>
@ -451,13 +453,13 @@ $ curl -v 'http://localhost:8123/predefined_query'
``` ```
``` bash ``` bash
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_final_threads?max_threads=1&max_final_threads=2' $ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_final_threads' 'http://localhost:8123/query_param_with_url/max_threads?max_threads=1&max_final_threads=2'
1 max_final_threads 2
max_final_threads 2 max_threads 1
``` ```
:::note Предупреждение :::note Предупреждение
В одном `predefined_query_handler` поддерживается только один запрос типа `INSERT`. В одном `predefined_query_handler` поддерживается только один запрос.
::: :::
### dynamic_query_handler {#dynamic_query_handler} ### dynamic_query_handler {#dynamic_query_handler}

View File

@ -2776,7 +2776,7 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
- 0 — номера строк не выводятся. - 0 — номера строк не выводятся.
- 1 — номера строк выводятся. - 1 — номера строк выводятся.
Значение по умолчанию: `0`. Значение по умолчанию: `1`.
**Пример** **Пример**
@ -2798,7 +2798,7 @@ SELECT TOP 3 name, value FROM system.settings;
``` ```
### output_format_pretty_color {#output_format_pretty_color} ### output_format_pretty_color {#output_format_pretty_color}
Включает/выключает управляющие последовательности ANSI в форматах Pretty. Включает/выключает управляющие последовательности ANSI в форматах Pretty.
Возможные значения: Возможные значения:
@ -4123,7 +4123,7 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca
## session_timezone {#session_timezone} ## session_timezone {#session_timezone}
Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан часовой пояс, будут интерпретированы как относящиеся к указанной зоне. Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан часовой пояс, будут интерпретированы как относящиеся к указанной зоне.
При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера. При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера.
Функции `timeZone()` and `serverTimezone()` возвращают часовой пояс текущей сессии и сервера соответственно. Функции `timeZone()` and `serverTimezone()` возвращают часовой пояс текущей сессии и сервера соответственно.

View File

@ -476,7 +476,7 @@ FROM
- `r1` - количество уникальных посетителей за 2020-01-01 (`cond1`). - `r1` - количество уникальных посетителей за 2020-01-01 (`cond1`).
- `r2` - количество уникальных посетителей в период между 2020-01-01 и 2020-01-02 (`cond1` и `cond2`). - `r2` - количество уникальных посетителей в период между 2020-01-01 и 2020-01-02 (`cond1` и `cond2`).
- `r3` - количество уникальных посетителей в период между 2020-01-01 и 2020-01-03 (`cond1` и `cond3`). - `r3` - количество уникальных посетителей в период за 2020-01-01 и 2020-01-03 (`cond1` и `cond3`).
## uniqUpTo(N)(x) {#uniquptonx} ## uniqUpTo(N)(x) {#uniquptonx}

View File

@ -120,7 +120,7 @@ FROM dt
- [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) - [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md)
- [Функции для работы с массивами](../../sql-reference/functions/array-functions.md) - [Функции для работы с массивами](../../sql-reference/functions/array-functions.md)
- [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format) - [Настройка `date_time_input_format`](../../operations/settings/index.md#settings-date_time_input_format)
- [Настройка `date_time_output_format`](../../operations/settings/index.md) - [Настройка `date_time_output_format`](../../operations/settings/index.md#settings-date_time_output_format)
- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) - [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone)
- [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone) - [Параметр `session_timezone`](../../operations/settings/settings.md#session_timezone)
- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) - [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime)

View File

@ -427,29 +427,32 @@ $ curl -v 'http://localhost:8123/predefined_query'
``` xml ``` xml
<http_handlers> <http_handlers>
<rule> <rule>
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url> <url><![CDATA[regex:/query_param_with_url/(?P<name_1>[^/]+)]]></url>
<method>GET</method> <methods>GET</methods>
<headers> <headers>
<XXX>TEST_HEADER_VALUE</XXX> <XXX>TEST_HEADER_VALUE</XXX>
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX> <PARAMS_XXX><![CDATA[regex:(?P<name_2>[^/]+)]]></PARAMS_XXX>
</headers> </headers>
<handler> <handler>
<type>predefined_query_handler</type> <type>predefined_query_handler</type>
<query>SELECT value FROM system.settings WHERE name = {name_1:String}</query> <query>
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query> SELECT name, value FROM system.settings
WHERE name IN ({name_1:String}, {name_2:String})
</query>
</handler> </handler>
</rule> </rule>
<defaults/>
</http_handlers> </http_handlers>
``` ```
``` bash ``` bash
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_final_threads?max_threads=1&max_final_threads=2' $ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_final_threads' 'http://localhost:8123/query_param_with_url/max_threads?max_threads=1&max_final_threads=2'
1 max_final_threads 2
max_final_threads 2 max_threads 1
``` ```
:::warning :::warning
在一个`predefined_query_handler`中,只支持insert类型的一个`查询`。 在一个`predefined_query_handler`中,只支持的一个`查询`。
::: :::
### 动态查询 {#dynamic_query_handler} ### 动态查询 {#dynamic_query_handler}

View File

@ -472,7 +472,7 @@ FROM
- `r1`-2020-01-01期间访问该网站的独立访问者数量 `cond1` 条件)。 - `r1`-2020-01-01期间访问该网站的独立访问者数量 `cond1` 条件)。
- `r2`-在2020-01-01和2020-01-02之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond2` 条件)。 - `r2`-在2020-01-01和2020-01-02之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond2` 条件)。
- `r3`-在2020-01-01和2020-01-03之间的特定时间段内访问该网站的唯一访问者的数量 (`cond1` 和 `cond3` 条件)。 - `r3`-在2020-01-01和2020-01-03 网站的独立访客数量 (`cond1` 和 `cond3` 条件)。
## uniqUpTo(N)(x) {#uniquptonx} ## uniqUpTo(N)(x) {#uniquptonx}

View File

@ -30,10 +30,6 @@ conflicts:
contents: contents:
- src: root/usr/lib/debug/usr/bin/clickhouse.debug - src: root/usr/lib/debug/usr/bin/clickhouse.debug
dst: /usr/lib/debug/usr/bin/clickhouse.debug dst: /usr/lib/debug/usr/bin/clickhouse.debug
- src: root/usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug
dst: /usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug
- src: root/usr/lib/debug/usr/bin/clickhouse-library-bridge.debug
dst: /usr/lib/debug/usr/bin/clickhouse-library-bridge.debug
# docs # docs
- src: ../AUTHORS - src: ../AUTHORS
dst: /usr/share/doc/clickhouse-common-static-dbg/AUTHORS dst: /usr/share/doc/clickhouse-common-static-dbg/AUTHORS

View File

@ -36,10 +36,6 @@ contents:
dst: /usr/bin/clickhouse dst: /usr/bin/clickhouse
- src: root/usr/bin/clickhouse-extract-from-config - src: root/usr/bin/clickhouse-extract-from-config
dst: /usr/bin/clickhouse-extract-from-config dst: /usr/bin/clickhouse-extract-from-config
- src: root/usr/bin/clickhouse-library-bridge
dst: /usr/bin/clickhouse-library-bridge
- src: root/usr/bin/clickhouse-odbc-bridge
dst: /usr/bin/clickhouse-odbc-bridge
- src: root/usr/share/bash-completion/completions - src: root/usr/share/bash-completion/completions
dst: /usr/share/bash-completion/completions dst: /usr/share/bash-completion/completions
- src: root/usr/share/clickhouse - src: root/usr/share/clickhouse

View File

@ -0,0 +1,35 @@
# package sources should be placed in ${PWD}/root
# nfpm should run from the same directory with a config
name: "clickhouse-library-bridge"
description: |
ClickHouse Library Bridge - is a separate process for loading libraries for the 'library' dictionary sources and the CatBoost library.
ClickHouse is a column-oriented database management system
that allows generating analytical data reports in real time.
# Common packages config
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
vendor: "ClickHouse Inc."
homepage: "https://clickhouse.com"
license: "Apache"
section: "database"
priority: "optional"
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
deb:
fields:
Source: clickhouse
# Package specific content
contents:
- src: root/usr/bin/clickhouse-library-bridge
dst: /usr/bin/clickhouse-library-bridge
# docs
- src: ../AUTHORS
dst: /usr/share/doc/clickhouse-library-bridge/AUTHORS
- src: ../CHANGELOG.md
dst: /usr/share/doc/clickhouse-library-bridge/CHANGELOG.md
- src: ../LICENSE
dst: /usr/share/doc/clickhouse-library-bridge/LICENSE
- src: ../README.md
dst: /usr/share/doc/clickhouse-library-bridge/README.md

View File

@ -0,0 +1,35 @@
# package sources should be placed in ${PWD}/root
# nfpm should run from the same directory with a config
name: "clickhouse-odbc-bridge"
description: |
ClickHouse ODBC Bridge - is a separate process for loading ODBC drivers and interacting with external databases using the ODBC protocol.
ClickHouse is a column-oriented database management system
that allows generating analytical data reports in real time.
# Common packages config
arch: "${DEB_ARCH}" # amd64, arm64
platform: "linux"
version: "${CLICKHOUSE_VERSION_STRING}"
vendor: "ClickHouse Inc."
homepage: "https://clickhouse.com"
license: "Apache"
section: "database"
priority: "optional"
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
deb:
fields:
Source: clickhouse
# Package specific content
contents:
- src: root/usr/bin/clickhouse-odbc-bridge
dst: /usr/bin/clickhouse-odbc-bridge
# docs
- src: ../AUTHORS
dst: /usr/share/doc/clickhouse-odbc-bridge/AUTHORS
- src: ../CHANGELOG.md
dst: /usr/share/doc/clickhouse-odbc-bridge/CHANGELOG.md
- src: ../LICENSE
dst: /usr/share/doc/clickhouse-odbc-bridge/LICENSE
- src: ../README.md
dst: /usr/share/doc/clickhouse-odbc-bridge/README.md

View File

@ -17,12 +17,13 @@
#include <Access/AccessControl.h> #include <Access/AccessControl.h>
#include <Common/config_version.h>
#include <Common/Exception.h>
#include <Common/formatReadable.h>
#include <Common/TerminalSize.h>
#include <Common/Config/ConfigProcessor.h> #include <Common/Config/ConfigProcessor.h>
#include <Common/Config/getClientConfigPath.h> #include <Common/Config/getClientConfigPath.h>
#include <Common/CurrentThread.h>
#include <Common/Exception.h>
#include <Common/TerminalSize.h>
#include <Common/config_version.h>
#include <Common/formatReadable.h>
#include <Columns/ColumnString.h> #include <Columns/ColumnString.h>
#include <Poco/Util/Application.h> #include <Poco/Util/Application.h>

View File

@ -237,7 +237,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
ASTPtr res = parseQueryAndMovePosition( ASTPtr res = parseQueryAndMovePosition(
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth, cmd_settings.max_parser_backtracks); parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth, cmd_settings.max_parser_backtracks);
std::unique_ptr<ReadBuffer> insert_query_payload = nullptr; std::unique_ptr<ReadBuffer> insert_query_payload;
/// If the query is INSERT ... VALUES, then we will try to parse the data. /// If the query is INSERT ... VALUES, then we will try to parse the data.
if (auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data) if (auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
{ {

View File

@ -662,7 +662,6 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
" <server>\n" " <server>\n"
" <certificateFile>" << (config_dir / "server.crt").string() << "</certificateFile>\n" " <certificateFile>" << (config_dir / "server.crt").string() << "</certificateFile>\n"
" <privateKeyFile>" << (config_dir / "server.key").string() << "</privateKeyFile>\n" " <privateKeyFile>" << (config_dir / "server.key").string() << "</privateKeyFile>\n"
" <dhParamsFile>" << (config_dir / "dhparam.pem").string() << "</dhParamsFile>\n"
" </server>\n" " </server>\n"
" </openSSL>\n" " </openSSL>\n"
"</clickhouse>\n"; "</clickhouse>\n";

View File

@ -24,9 +24,4 @@ target_link_libraries(clickhouse-library-bridge PRIVATE
set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
if (SPLIT_DEBUG_SYMBOLS) install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
clickhouse_split_debug_symbols(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-library-bridge)
else()
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
endif()

View File

@ -35,7 +35,7 @@ public:
ExternalDictionaryLibraryAPI::CStrings strings; // will pass pointer to lib ExternalDictionaryLibraryAPI::CStrings strings; // will pass pointer to lib
private: private:
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder = nullptr; std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder;
Container strings_holder; Container strings_holder;
}; };

View File

@ -30,12 +30,7 @@ target_link_libraries(clickhouse-odbc-bridge PRIVATE
set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..) set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
target_compile_options (clickhouse-odbc-bridge PRIVATE -Wno-reserved-id-macro -Wno-keyword-macro) target_compile_options (clickhouse-odbc-bridge PRIVATE -Wno-reserved-id-macro -Wno-keyword-macro)
if (SPLIT_DEBUG_SYMBOLS) install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
clickhouse_split_debug_symbols(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-odbc-bridge)
else()
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
endif()
if(ENABLE_TESTS) if(ENABLE_TESTS)
add_subdirectory(tests) add_subdirectory(tests)

View File

@ -12,6 +12,7 @@
#include <Poco/Net/HTTPServerRequest.h> #include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h> #include <Poco/Net/HTTPServerResponse.h>
#include <Poco/NumberParser.h> #include <Poco/NumberParser.h>
#include <Interpreters/Context.h>
#include <Common/logger_useful.h> #include <Common/logger_useful.h>
#include <Common/BridgeProtocolVersion.h> #include <Common/BridgeProtocolVersion.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>

View File

@ -5,7 +5,6 @@
#if USE_ODBC #if USE_ODBC
#include <Interpreters/Context_fwd.h> #include <Interpreters/Context_fwd.h>
#include <Interpreters/Context.h>
#include <Server/HTTP/HTTPRequestHandler.h> #include <Server/HTTP/HTTPRequestHandler.h>
#include <Poco/Logger.h> #include <Poco/Logger.h>

View File

@ -734,13 +734,17 @@ try
LOG_INFO(log, "Available CPU instruction sets: {}", cpu_info); LOG_INFO(log, "Available CPU instruction sets: {}", cpu_info);
#endif #endif
bool will_have_trace_collector = hasPHDRCache() && config().has("trace_log");
// Initialize global thread pool. Do it before we fetch configs from zookeeper // Initialize global thread pool. Do it before we fetch configs from zookeeper
// nodes (`from_zk`), because ZooKeeper interface uses the pool. We will // nodes (`from_zk`), because ZooKeeper interface uses the pool. We will
// ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well. // ignore `max_thread_pool_size` in configs we fetch from ZK, but oh well.
GlobalThreadPool::initialize( GlobalThreadPool::initialize(
server_settings.max_thread_pool_size, server_settings.max_thread_pool_size,
server_settings.max_thread_pool_free_size, server_settings.max_thread_pool_free_size,
server_settings.thread_pool_queue_size); server_settings.thread_pool_queue_size,
will_have_trace_collector ? server_settings.global_profiler_real_time_period_ns : 0,
will_have_trace_collector ? server_settings.global_profiler_cpu_time_period_ns : 0);
/// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed). /// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed).
SCOPE_EXIT({ SCOPE_EXIT({
Stopwatch watch; Stopwatch watch;

View File

@ -96,7 +96,7 @@
<to>https://{bucket}.s3.amazonaws.com</to> <to>https://{bucket}.s3.amazonaws.com</to>
</s3> </s3>
<gs> <gs>
<to>https://{bucket}.storage.googleapis.com</to> <to>https://storage.googleapis.com/{bucket}</to>
</gs> </gs>
<oss> <oss>
<to>https://{bucket}.oss.aliyuncs.com</to> <to>https://{bucket}.oss.aliyuncs.com</to>

View File

@ -16,6 +16,8 @@
#include <IO/ReadBufferFromString.h> #include <IO/ReadBufferFromString.h>
#include <Poco/UUIDGenerator.h> #include <Poco/UUIDGenerator.h>
#include <base/insertAtEnd.h> #include <base/insertAtEnd.h>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/copy.hpp> #include <boost/range/algorithm/copy.hpp>
namespace fs = std::filesystem; namespace fs = std::filesystem;

View File

@ -1,6 +1,8 @@
#include <Access/AccessRights.h> #include <Access/AccessRights.h>
#include <Common/logger_useful.h>
#include <base/sort.h> #include <base/sort.h>
#include <Common/Exception.h>
#include <Common/logger_useful.h>
#include <boost/container/small_vector.hpp> #include <boost/container/small_vector.hpp>
#include <boost/range/adaptor/map.hpp> #include <boost/range/adaptor/map.hpp>
#include <unordered_map> #include <unordered_map>

View File

@ -115,34 +115,34 @@ public:
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
{ {
this->data(place).add(*columns[0], row_num, arena); data(place).add(*columns[0], row_num, arena);
} }
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
{ {
this->data(place).addManyDefaults(*columns[0], 0, arena); data(place).addManyDefaults(*columns[0], 0, arena);
} }
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
{ {
this->data(place).add(this->data(rhs), arena); data(place).add(data(rhs), arena);
} }
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{ {
this->data(place).write(buf, *serialization); data(place).write(buf, *serialization);
} }
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
{ {
this->data(place).read(buf, *serialization, arena); data(place).read(buf, *serialization, arena);
} }
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); } bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
{ {
this->data(place).insertResultInto(to); data(place).insertResultInto(to);
} }
}; };

View File

@ -1,11 +1,11 @@
#include <AggregateFunctions/AggregateFunctionFactory.h> #include <AggregateFunctions/AggregateFunctionFactory.h>
#include <AggregateFunctions/Combinators/AggregateFunctionCombinatorFactory.h> #include <AggregateFunctions/Combinators/AggregateFunctionCombinatorFactory.h>
#include <DataTypes/DataTypeLowCardinality.h> #include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypesNumber.h> #include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h> #include <Functions/FunctionFactory.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <Common/CurrentThread.h>
static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000; static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000;

View File

@ -559,7 +559,7 @@ public:
ptr = ptrs[row_num]; ptr = ptrs[row_num];
} }
this->data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena); data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
} }
void addManyDefaults( void addManyDefaults(
@ -572,7 +572,7 @@ public:
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
{ {
this->data(place).merge(this->data(rhs), arena); data(place).merge(data(rhs), arena);
} }
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t> /* version */) const override
@ -590,7 +590,7 @@ public:
auto & array = assert_cast<ColumnArray &>(to); auto & array = assert_cast<ColumnArray &>(to);
auto & str = assert_cast<ColumnString &>(array.getData()); auto & str = assert_cast<ColumnString &>(array.getData());
this->data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0); data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
array.getOffsets().push_back(str.size()); array.getOffsets().push_back(str.size());
} }

View File

@ -89,10 +89,10 @@ struct GroupArraySamplerData
chassert(lim != 0); chassert(lim != 0);
/// With a large number of values, we will generate random numbers several times slower. /// With a large number of values, we will generate random numbers several times slower.
if (lim <= static_cast<UInt64>(rng.max())) if (lim <= static_cast<UInt64>(pcg32_fast::max()))
return rng() % lim; return rng() % lim;
else else
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % lim; return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
} }
void randomShuffle() void randomShuffle()

View File

@ -242,7 +242,7 @@ public:
{ {
Float64 x = getFloat64DataFromColumn(columns[0], row_num, this->x_type); Float64 x = getFloat64DataFromColumn(columns[0], row_num, this->x_type);
Float64 y = getFloat64DataFromColumn(columns[1], row_num, this->y_type); Float64 y = getFloat64DataFromColumn(columns[1], row_num, this->y_type);
this->data(place).add(x, y, arena); data(place).add(x, y, arena);
} }
Float64 getFloat64DataFromColumn(const IColumn * column, size_t row_num, TypeIndex type_index) const Float64 getFloat64DataFromColumn(const IColumn * column, size_t row_num, TypeIndex type_index) const
@ -264,25 +264,25 @@ public:
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
{ {
auto & a = this->data(place); auto & a = data(place);
const auto & b = this->data(rhs); const auto & b = data(rhs);
a.merge(b, arena); a.merge(b, arena);
} }
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{ {
this->data(place).write(buf); data(place).write(buf);
} }
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
{ {
this->data(place).read(buf, arena); data(place).read(buf, arena);
} }
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
{ {
auto res = this->data(place).getResult(total_buckets, arena); auto res = data(place).getResult(total_buckets, arena);
auto & col = assert_cast<ColumnArray &>(to); auto & col = assert_cast<ColumnArray &>(to);
auto & col_offsets = assert_cast<ColumnArray::ColumnOffsets &>(col.getOffsetsColumn()); auto & col_offsets = assert_cast<ColumnArray::ColumnOffsets &>(col.getOffsetsColumn());

View File

@ -205,35 +205,35 @@ public:
UInt8 is_second = columns[1]->getUInt(row_num); UInt8 is_second = columns[1]->getUInt(row_num);
if (is_second) if (is_second)
this->data(place).addY(value, arena); data(place).addY(value, arena);
else else
this->data(place).addX(value, arena); data(place).addX(value, arena);
} }
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
{ {
auto & a = this->data(place); auto & a = data(place);
const auto & b = this->data(rhs); const auto & b = data(rhs);
a.merge(b, arena); a.merge(b, arena);
} }
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{ {
this->data(place).write(buf); data(place).write(buf);
} }
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
{ {
this->data(place).read(buf, arena); data(place).read(buf, arena);
} }
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
{ {
if (!this->data(place).size_x || !this->data(place).size_y) if (!data(place).size_x || !data(place).size_y)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} require both samples to be non empty", getName()); throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} require both samples to be non empty", getName());
auto [u_statistic, p_value] = this->data(place).getResult(alternative, continuity_correction); auto [u_statistic, p_value] = data(place).getResult(alternative, continuity_correction);
/// Because p-value is a probability. /// Because p-value is a probability.
p_value = std::min(1.0, std::max(0.0, p_value)); p_value = std::min(1.0, std::max(0.0, p_value));

View File

@ -66,31 +66,31 @@ public:
{ {
Float64 new_x = columns[0]->getFloat64(row_num); Float64 new_x = columns[0]->getFloat64(row_num);
Float64 new_y = columns[1]->getFloat64(row_num); Float64 new_y = columns[1]->getFloat64(row_num);
this->data(place).addX(new_x, arena); data(place).addX(new_x, arena);
this->data(place).addY(new_y, arena); data(place).addY(new_y, arena);
} }
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
{ {
auto & a = this->data(place); auto & a = data(place);
const auto & b = this->data(rhs); const auto & b = data(rhs);
a.merge(b, arena); a.merge(b, arena);
} }
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{ {
this->data(place).write(buf); data(place).write(buf);
} }
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
{ {
this->data(place).read(buf, arena); data(place).read(buf, arena);
} }
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
{ {
auto answer = this->data(place).getResult(); auto answer = data(place).getResult();
auto & column = static_cast<ColumnVector<Float64> &>(to); auto & column = static_cast<ColumnVector<Float64> &>(to);
column.getData().push_back(answer); column.getData().push_back(answer);

View File

@ -102,24 +102,24 @@ public:
auto event = assert_cast<const ColumnVector<UInt8> *>(columns[i])->getData()[row_num]; auto event = assert_cast<const ColumnVector<UInt8> *>(columns[i])->getData()[row_num];
if (event) if (event)
{ {
this->data(place).add(i); data(place).add(i);
} }
} }
} }
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
{ {
this->data(place).merge(this->data(rhs)); data(place).merge(data(rhs));
} }
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{ {
this->data(place).serialize(buf); data(place).serialize(buf);
} }
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
{ {
this->data(place).deserialize(buf); data(place).deserialize(buf);
} }
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
@ -130,13 +130,13 @@ public:
ColumnArray::Offset current_offset = data_to.size(); ColumnArray::Offset current_offset = data_to.size();
data_to.resize(current_offset + events_size); data_to.resize(current_offset + events_size);
const bool first_flag = this->data(place).events.test(0); const bool first_flag = data(place).events.test(0);
data_to[current_offset] = first_flag; data_to[current_offset] = first_flag;
++current_offset; ++current_offset;
for (size_t i = 1; i < events_size; ++i) for (size_t i = 1; i < events_size; ++i)
{ {
data_to[current_offset] = (first_flag && this->data(place).events.test(i)); data_to[current_offset] = (first_flag && data(place).events.test(i));
++current_offset; ++current_offset;
} }

View File

@ -123,22 +123,22 @@ public:
Float64 x = columns[0]->getFloat64(row_num); Float64 x = columns[0]->getFloat64(row_num);
Float64 y = columns[1]->getFloat64(row_num); Float64 y = columns[1]->getFloat64(row_num);
this->data(place).add(x, y); data(place).add(x, y);
} }
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
{ {
this->data(place).merge(this->data(rhs)); data(place).merge(data(rhs));
} }
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{ {
this->data(place).serialize(buf); data(place).serialize(buf);
} }
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
{ {
this->data(place).deserialize(buf); data(place).deserialize(buf);
} }
static DataTypePtr createResultType() static DataTypePtr createResultType()
@ -168,8 +168,8 @@ public:
IColumn & to, IColumn & to,
Arena *) const override Arena *) const override
{ {
Float64 k = this->data(place).getK(); Float64 k = data(place).getK();
Float64 b = this->data(place).getB(k); Float64 b = data(place).getB(k);
auto & col_tuple = assert_cast<ColumnTuple &>(to); auto & col_tuple = assert_cast<ColumnTuple &>(to);
auto & col_k = assert_cast<ColumnVector<Float64> &>(col_tuple.getColumn(0)); auto & col_k = assert_cast<ColumnVector<Float64> &>(col_tuple.getColumn(0));

View File

@ -120,7 +120,7 @@ public:
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
{ {
this->data(place).add(*columns[0], row_num, arena); data(place).add(*columns[0], row_num, arena);
} }
void addBatchSinglePlace( void addBatchSinglePlace(
@ -131,7 +131,7 @@ public:
Arena * arena, Arena * arena,
ssize_t if_argument_pos) const override ssize_t if_argument_pos) const override
{ {
if (this->data(place).isNull()) if (data(place).isNull())
return; return;
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlace( IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlace(
row_begin, row_end, place, columns, arena, if_argument_pos); row_begin, row_end, place, columns, arena, if_argument_pos);
@ -146,7 +146,7 @@ public:
Arena * arena, Arena * arena,
ssize_t if_argument_pos) const override ssize_t if_argument_pos) const override
{ {
if (this->data(place).isNull()) if (data(place).isNull())
return; return;
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlaceNotNull( IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlaceNotNull(
row_begin, row_end, place, columns, null_map, arena, if_argument_pos); row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
@ -154,29 +154,29 @@ public:
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
{ {
this->data(place).add(*columns[0], 0, arena); data(place).add(*columns[0], 0, arena);
} }
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
{ {
this->data(place).add(this->data(rhs), arena); data(place).add(data(rhs), arena);
} }
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{ {
this->data(place).write(buf, *serialization); data(place).write(buf, *serialization);
} }
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
{ {
this->data(place).read(buf, *serialization, arena); data(place).read(buf, *serialization, arena);
} }
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); } bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
{ {
this->data(place).insertResultInto(to); data(place).insertResultInto(to);
} }
}; };

View File

@ -150,13 +150,13 @@ private:
Float64 getResult(ConstAggregateDataPtr __restrict place) const Float64 getResult(ConstAggregateDataPtr __restrict place) const
{ {
const auto & data = this->data(place); const auto & dt = data(place);
switch (kind) switch (kind)
{ {
case VarKind::varSampStable: return getVarSamp(data.m2, data.count); case VarKind::varSampStable: return getVarSamp(dt.m2, dt.count);
case VarKind::stddevSampStable: return getStddevSamp(data.m2, data.count); case VarKind::stddevSampStable: return getStddevSamp(dt.m2, dt.count);
case VarKind::varPopStable: return getVarPop(data.m2, data.count); case VarKind::varPopStable: return getVarPop(dt.m2, dt.count);
case VarKind::stddevPopStable: return getStddevPop(data.m2, data.count); case VarKind::stddevPopStable: return getStddevPop(dt.m2, dt.count);
} }
} }
@ -182,22 +182,22 @@ public:
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
{ {
this->data(place).update(*columns[0], row_num); data(place).update(*columns[0], row_num);
} }
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
{ {
this->data(place).mergeWith(this->data(rhs)); data(place).mergeWith(data(rhs));
} }
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
{ {
this->data(place).serialize(buf); data(place).serialize(buf);
} }
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
{ {
this->data(place).deserialize(buf); data(place).deserialize(buf);
} }
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override

View File

@ -491,7 +491,7 @@ public:
std::vector<const UInt8 *> nullable_filters; std::vector<const UInt8 *> nullable_filters;
const IColumn * nested_columns[number_of_arguments]; const IColumn * nested_columns[number_of_arguments];
std::unique_ptr<UInt8[]> final_flags = nullptr; std::unique_ptr<UInt8[]> final_flags;
const UInt8 * final_flags_ptr = nullptr; const UInt8 * final_flags_ptr = nullptr;
if (if_argument_pos >= 0) if (if_argument_pos >= 0)

View File

@ -1,17 +1,18 @@
#pragma once #pragma once
#include <AggregateFunctions/IAggregateFunction_fwd.h>
#include <Columns/ColumnSparse.h> #include <Columns/ColumnSparse.h>
#include <Columns/ColumnTuple.h> #include <Columns/ColumnTuple.h>
#include <Columns/ColumnsNumber.h> #include <Columns/ColumnsNumber.h>
#include <Core/Block.h> #include <Core/Block.h>
#include <Core/ColumnNumbers.h> #include <Core/ColumnNumbers.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <Core/IResolvedFunction.h>
#include <Core/ValuesWithType.h> #include <Core/ValuesWithType.h>
#include <Interpreters/Context_fwd.h> #include <Interpreters/Context_fwd.h>
#include <base/types.h> #include <base/types.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/ThreadPool_fwd.h> #include <Common/ThreadPool_fwd.h>
#include <Core/IResolvedFunction.h>
#include "config.h" #include "config.h"
@ -46,13 +47,6 @@ class IWindowFunction;
using DataTypePtr = std::shared_ptr<const IDataType>; using DataTypePtr = std::shared_ptr<const IDataType>;
using DataTypes = std::vector<DataTypePtr>; using DataTypes = std::vector<DataTypePtr>;
using AggregateDataPtr = char *;
using AggregateDataPtrs = std::vector<AggregateDataPtr>;
using ConstAggregateDataPtr = const char *;
class IAggregateFunction;
using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
struct AggregateFunctionProperties; struct AggregateFunctionProperties;
/** Aggregate functions interface. /** Aggregate functions interface.

View File

@ -0,0 +1,14 @@
#pragma once
#include <memory>
#include <vector>
namespace DB
{
using AggregateDataPtr = char *;
using AggregateDataPtrs = std::vector<AggregateDataPtr>;
using ConstAggregateDataPtr = const char *;
class IAggregateFunction;
using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
}

View File

@ -258,10 +258,10 @@ private:
chassert(limit > 0); chassert(limit > 0);
/// With a large number of values, we will generate random numbers several times slower. /// With a large number of values, we will generate random numbers several times slower.
if (limit <= static_cast<UInt64>(rng.max())) if (limit <= static_cast<UInt64>(pcg32_fast::max()))
return rng() % limit; return rng() % limit;
else else
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % limit; return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32_fast::max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
} }
void sortIfNeeded() void sortIfNeeded()

View File

@ -579,7 +579,7 @@ std::optional<size_t> SingleValueDataFixed<T>::getGreatestIndexNotNullIf(
return std::nullopt; return std::nullopt;
for (size_t i = index + 1; i < row_end; i++) for (size_t i = index + 1; i < row_end; i++)
if ((!if_map || if_map[i] != 0) && (!null_map || null_map[i] == 0) && (vec[i] < vec[index])) if ((!if_map || if_map[i] != 0) && (!null_map || null_map[i] == 0) && (vec[i] > vec[index]))
index = i; index = i;
return {index}; return {index};
} }

View File

@ -1,14 +1,12 @@
#include <Analyzer/ArrayJoinNode.h> #include <Analyzer/ArrayJoinNode.h>
#include <Analyzer/ColumnNode.h>
#include <Analyzer/Utils.h>
#include <IO/Operators.h>
#include <IO/WriteBuffer.h> #include <IO/WriteBuffer.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/Operators.h>
#include <Parsers/ASTTablesInSelectQuery.h>
#include <Parsers/ASTExpressionList.h> #include <Parsers/ASTExpressionList.h>
#include <Parsers/ASTTablesInSelectQuery.h>
#include <Analyzer/Utils.h> #include <Common/assert_cast.h>
#include <Analyzer/ColumnNode.h>
namespace DB namespace DB
{ {

View File

@ -1,14 +1,12 @@
#include <Analyzer/ColumnNode.h> #include <Analyzer/ColumnNode.h>
#include <Analyzer/TableNode.h>
#include <Common/SipHash.h> #include <IO/Operators.h>
#include <IO/WriteBuffer.h> #include <IO/WriteBuffer.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/Operators.h>
#include <Parsers/ASTIdentifier.h> #include <Parsers/ASTIdentifier.h>
#include <Common/SipHash.h>
#include <Common/assert_cast.h>
#include <Analyzer/TableNode.h>
namespace DB namespace DB
{ {

View File

@ -1,16 +1,14 @@
#include <Analyzer/JoinNode.h> #include <Analyzer/JoinNode.h>
#include <Analyzer/ListNode.h> #include <Analyzer/ListNode.h>
#include <Analyzer/Utils.h>
#include <IO/Operators.h>
#include <IO/WriteBuffer.h> #include <IO/WriteBuffer.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/Operators.h>
#include <Parsers/ASTSubquery.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTFunction.h> #include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTSubquery.h>
#include <Parsers/ASTTablesInSelectQuery.h> #include <Parsers/ASTTablesInSelectQuery.h>
#include <Common/assert_cast.h>
#include <Analyzer/Utils.h>
namespace DB namespace DB
{ {

View File

@ -1940,8 +1940,7 @@ std::vector<String> QueryAnalyzer::collectIdentifierTypoHints(const Identifier &
for (const auto & valid_identifier : valid_identifiers) for (const auto & valid_identifier : valid_identifiers)
prompting_strings.push_back(valid_identifier.getFullName()); prompting_strings.push_back(valid_identifier.getFullName());
NamePrompter<1> prompter; return NamePrompter<1>::getHints(unresolved_identifier.getFullName(), prompting_strings);
return prompter.getHints(unresolved_identifier.getFullName(), prompting_strings);
} }
/** Wrap expression node in tuple element function calls for nested paths. /** Wrap expression node in tuple element function calls for nested paths.
@ -3993,9 +3992,15 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const
} }
else if (resolved_identifier->as<ConstantNode>()) else if (resolved_identifier->as<ConstantNode>())
{ {
lookup_result.resolved_identifier = resolved_identifier;
return lookup_result; return lookup_result;
} }
else if (auto * resolved_function = resolved_identifier->as<FunctionNode>())
{
/// Special case: scalar subquery was executed and replaced by __getScalar function.
/// Handle it as a constant.
if (resolved_function->getFunctionName() == "__getScalar")
return lookup_result;
}
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
"Resolve identifier '{}' from parent scope only supported for constants and CTE. Actual {} node type {}. In scope {}", "Resolve identifier '{}' from parent scope only supported for constants and CTE. Actual {} node type {}. In scope {}",
@ -6083,7 +6088,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
* Example: SELECT toTypeName(sum(number)) FROM numbers(10); * Example: SELECT toTypeName(sum(number)) FROM numbers(10);
*/ */
if (column && isColumnConst(*column) && !typeid_cast<const ColumnConst *>(column.get())->getDataColumn().isDummy() && if (column && isColumnConst(*column) && !typeid_cast<const ColumnConst *>(column.get())->getDataColumn().isDummy() &&
(!hasAggregateFunctionNodes(node) && !hasFunctionNode(node, "arrayJoin"))) !hasAggregateFunctionNodes(node) && !hasFunctionNode(node, "arrayJoin") &&
/// Sanity check: do not convert large columns to constants
column->byteSize() < 1_MiB)
{ {
/// Replace function node with result constant node /// Replace function node with result constant node
Field column_constant_value; Field column_constant_value;

View File

@ -29,7 +29,8 @@ NamesAndTypes extractProjectionColumnsForGroupBy(const QueryNode * query_node)
return {}; return {};
NamesAndTypes result; NamesAndTypes result;
for (const auto & group_by_ele : query_node->getGroupByNode()->getChildren()) const auto & group_by_elements = query_node->getGroupByNode()->getChildren();
for (const auto & group_by_element : group_by_elements)
{ {
const auto & projection_columns = query_node->getProjectionColumns(); const auto & projection_columns = query_node->getProjectionColumns();
const auto & projection_nodes = query_node->getProjection().getNodes(); const auto & projection_nodes = query_node->getProjection().getNodes();
@ -38,10 +39,18 @@ NamesAndTypes extractProjectionColumnsForGroupBy(const QueryNode * query_node)
for (size_t i = 0; i < projection_columns.size(); i++) for (size_t i = 0; i < projection_columns.size(); i++)
{ {
if (projection_nodes[i]->isEqual(*group_by_ele)) if (projection_nodes[i]->isEqual(*group_by_element))
{
result.push_back(projection_columns[i]); result.push_back(projection_columns[i]);
break;
}
} }
} }
/// If some group by keys are not matched, we cannot apply optimization,
/// because prefix of group by keys may not be unique.
if (result.size() != group_by_elements.size())
return {};
return result; return result;
} }

View File

@ -444,8 +444,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
nulls_sort_direction = order_by_element.nulls_direction == 1 ? SortDirection::ASCENDING : SortDirection::DESCENDING; nulls_sort_direction = order_by_element.nulls_direction == 1 ? SortDirection::ASCENDING : SortDirection::DESCENDING;
std::shared_ptr<Collator> collator; std::shared_ptr<Collator> collator;
if (order_by_element.collation) if (order_by_element.getCollation())
collator = std::make_shared<Collator>(order_by_element.collation->as<ASTLiteral &>().value.get<String &>()); collator = std::make_shared<Collator>(order_by_element.getCollation()->as<ASTLiteral &>().value.get<String &>());
const auto & sort_expression_ast = order_by_element.children.at(0); const auto & sort_expression_ast = order_by_element.children.at(0);
auto sort_expression = buildExpression(sort_expression_ast, context); auto sort_expression = buildExpression(sort_expression_ast, context);
@ -455,12 +455,12 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express
std::move(collator), std::move(collator),
order_by_element.with_fill); order_by_element.with_fill);
if (order_by_element.fill_from) if (order_by_element.getFillFrom())
sort_node->getFillFrom() = buildExpression(order_by_element.fill_from, context); sort_node->getFillFrom() = buildExpression(order_by_element.getFillFrom(), context);
if (order_by_element.fill_to) if (order_by_element.getFillTo())
sort_node->getFillTo() = buildExpression(order_by_element.fill_to, context); sort_node->getFillTo() = buildExpression(order_by_element.getFillTo(), context);
if (order_by_element.fill_step) if (order_by_element.getFillStep())
sort_node->getFillStep() = buildExpression(order_by_element.fill_step, context); sort_node->getFillStep() = buildExpression(order_by_element.getFillStep(), context);
list_node->getNodes().push_back(std::move(sort_node)); list_node->getNodes().push_back(std::move(sort_node));
} }
@ -558,7 +558,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildExpression(const ASTPtr & expression, co
} }
else if (const auto * function = expression->as<ASTFunction>()) else if (const auto * function = expression->as<ASTFunction>())
{ {
if (function->is_lambda_function) if (function->is_lambda_function || isASTLambdaFunction(*function))
{ {
const auto & lambda_arguments_and_expression = function->arguments->as<ASTExpressionList &>().children; const auto & lambda_arguments_and_expression = function->arguments->as<ASTExpressionList &>().children;
auto & lambda_arguments_tuple = lambda_arguments_and_expression.at(0)->as<ASTFunction &>(); auto & lambda_arguments_tuple = lambda_arguments_and_expression.at(0)->as<ASTFunction &>();

View File

@ -1,14 +1,15 @@
#pragma once #pragma once
#include <Core/Settings.h> #include <Core/Block.h>
#include <DataTypes/IDataType.h> #include <memory>
#include <QueryPipeline/SizeLimits.h>
namespace DB namespace DB
{ {
class IDataType;
using DataTypePtr = std::shared_ptr<const IDataType>;
class Set; class Set;
using SetPtr = std::shared_ptr<Set>; using SetPtr = std::shared_ptr<Set>;

View File

@ -120,17 +120,18 @@ ASTPtr SortNode::toASTImpl(const ConvertToASTOptions & options) const
result->nulls_direction_was_explicitly_specified = nulls_sort_direction.has_value(); result->nulls_direction_was_explicitly_specified = nulls_sort_direction.has_value();
result->with_fill = with_fill;
result->fill_from = hasFillFrom() ? getFillFrom()->toAST(options) : nullptr;
result->fill_to = hasFillTo() ? getFillTo()->toAST(options) : nullptr;
result->fill_step = hasFillStep() ? getFillStep()->toAST(options) : nullptr;
result->children.push_back(getExpression()->toAST(options)); result->children.push_back(getExpression()->toAST(options));
if (collator) if (collator)
{ result->setCollation(std::make_shared<ASTLiteral>(Field(collator->getLocale())));
result->children.push_back(std::make_shared<ASTLiteral>(Field(collator->getLocale())));
result->collation = result->children.back(); result->with_fill = with_fill;
} if (hasFillFrom())
result->setFillFrom(getFillFrom()->toAST(options));
if (hasFillTo())
result->setFillTo(getFillTo()->toAST(options));
if (hasFillStep())
result->setFillStep(getFillStep()->toAST(options));
return result; return result;
} }

View File

@ -1,11 +1,9 @@
#include <Analyzer/WindowNode.h> #include <Analyzer/WindowNode.h>
#include <Common/SipHash.h>
#include <IO/WriteBufferFromString.h>
#include <IO/Operators.h> #include <IO/Operators.h>
#include <IO/WriteBufferFromString.h>
#include <Parsers/ASTWindowDefinition.h> #include <Parsers/ASTWindowDefinition.h>
#include <Common/SipHash.h>
#include <Common/assert_cast.h>
namespace DB namespace DB
{ {

View File

@ -1,22 +1,25 @@
#include <Access/Common/AccessEntityType.h>
#include <Backups/BackupCoordinationStage.h>
#include <Backups/BackupEntriesCollector.h> #include <Backups/BackupEntriesCollector.h>
#include <Backups/BackupEntryFromMemory.h> #include <Backups/BackupEntryFromMemory.h>
#include <Backups/IBackupCoordination.h>
#include <Backups/BackupCoordinationStage.h>
#include <Backups/BackupUtils.h> #include <Backups/BackupUtils.h>
#include <Backups/DDLAdjustingForBackupVisitor.h> #include <Backups/DDLAdjustingForBackupVisitor.h>
#include <Backups/IBackupCoordination.h>
#include <Databases/IDatabase.h> #include <Databases/IDatabase.h>
#include <Interpreters/Context.h> #include <Interpreters/Context.h>
#include <Interpreters/DatabaseCatalog.h> #include <Interpreters/DatabaseCatalog.h>
#include <Parsers/ASTCreateQuery.h> #include <Parsers/ASTCreateQuery.h>
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Storages/IStorage.h> #include <Storages/IStorage.h>
#include <Access/Common/AccessEntityType.h>
#include <base/chrono_io.h> #include <base/chrono_io.h>
#include <base/insertAtEnd.h> #include <base/insertAtEnd.h>
#include <base/scope_guard.h>
#include <base/sleep.h> #include <base/sleep.h>
#include <Common/escapeForFileName.h> #include <Common/escapeForFileName.h>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/copy.hpp> #include <boost/range/algorithm/copy.hpp>
#include <base/scope_guard.h>
#include <filesystem> #include <filesystem>
namespace fs = std::filesystem; namespace fs = std::filesystem;

View File

@ -124,11 +124,12 @@ BackupReaderS3::BackupReaderS3(
bool allow_s3_native_copy, bool allow_s3_native_copy,
const ReadSettings & read_settings_, const ReadSettings & read_settings_,
const WriteSettings & write_settings_, const WriteSettings & write_settings_,
const ContextPtr & context_) const ContextPtr & context_,
bool is_internal_backup)
: BackupReaderDefault(read_settings_, write_settings_, getLogger("BackupReaderS3")) : BackupReaderDefault(read_settings_, write_settings_, getLogger("BackupReaderS3"))
, s3_uri(s3_uri_) , s3_uri(s3_uri_)
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false} , data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false}
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName())) , s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName(), /*ignore_user=*/is_internal_backup))
{ {
auto & request_settings = s3_settings.request_settings; auto & request_settings = s3_settings.request_settings;
request_settings.updateFromSettings(context_->getSettingsRef()); request_settings.updateFromSettings(context_->getSettingsRef());
@ -214,11 +215,12 @@ BackupWriterS3::BackupWriterS3(
const String & storage_class_name, const String & storage_class_name,
const ReadSettings & read_settings_, const ReadSettings & read_settings_,
const WriteSettings & write_settings_, const WriteSettings & write_settings_,
const ContextPtr & context_) const ContextPtr & context_,
bool is_internal_backup)
: BackupWriterDefault(read_settings_, write_settings_, getLogger("BackupWriterS3")) : BackupWriterDefault(read_settings_, write_settings_, getLogger("BackupWriterS3"))
, s3_uri(s3_uri_) , s3_uri(s3_uri_)
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false} , data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::S3, MetadataStorageType::None, s3_uri.endpoint, false, false}
, s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName())) , s3_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString(), context_->getUserName(), /*ignore_user=*/is_internal_backup))
{ {
auto & request_settings = s3_settings.request_settings; auto & request_settings = s3_settings.request_settings;
request_settings.updateFromSettings(context_->getSettingsRef()); request_settings.updateFromSettings(context_->getSettingsRef());

View File

@ -18,7 +18,15 @@ namespace DB
class BackupReaderS3 : public BackupReaderDefault class BackupReaderS3 : public BackupReaderDefault
{ {
public: public:
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_); BackupReaderS3(
const S3::URI & s3_uri_,
const String & access_key_id_,
const String & secret_access_key_,
bool allow_s3_native_copy,
const ReadSettings & read_settings_,
const WriteSettings & write_settings_,
const ContextPtr & context_,
bool is_internal_backup);
~BackupReaderS3() override; ~BackupReaderS3() override;
bool fileExists(const String & file_name) override; bool fileExists(const String & file_name) override;
@ -41,7 +49,16 @@ private:
class BackupWriterS3 : public BackupWriterDefault class BackupWriterS3 : public BackupWriterDefault
{ {
public: public:
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, bool allow_s3_native_copy, const String & storage_class_name, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_); BackupWriterS3(
const S3::URI & s3_uri_,
const String & access_key_id_,
const String & secret_access_key_,
bool allow_s3_native_copy,
const String & storage_class_name,
const ReadSettings & read_settings_,
const WriteSettings & write_settings_,
const ContextPtr & context_,
bool is_internal_backup);
~BackupWriterS3() override; ~BackupWriterS3() override;
bool fileExists(const String & file_name) override; bool fileExists(const String & file_name) override;

View File

@ -27,6 +27,8 @@
#include <Common/scope_guard_safe.h> #include <Common/scope_guard_safe.h>
#include <Common/ThreadPool.h> #include <Common/ThreadPool.h>
#include <boost/range/adaptor/map.hpp>
namespace CurrentMetrics namespace CurrentMetrics
{ {
@ -940,6 +942,7 @@ void BackupsWorker::doRestore(
backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup; backup_open_params.use_same_s3_credentials_for_base_backup = restore_settings.use_same_s3_credentials_for_base_backup;
backup_open_params.read_settings = getReadSettingsForRestore(context); backup_open_params.read_settings = getReadSettingsForRestore(context);
backup_open_params.write_settings = getWriteSettingsForRestore(context); backup_open_params.write_settings = getWriteSettingsForRestore(context);
backup_open_params.is_internal_backup = restore_settings.internal;
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params); BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
String current_database = context->getCurrentDatabase(); String current_database = context->getCurrentDatabase();

View File

@ -24,6 +24,9 @@
#include <Common/escapeForFileName.h> #include <Common/escapeForFileName.h>
#include <base/insertAtEnd.h> #include <base/insertAtEnd.h>
#include <boost/algorithm/string/join.hpp> #include <boost/algorithm/string/join.hpp>
#include <boost/range/adaptor/map.hpp>
#include <filesystem> #include <filesystem>
#include <ranges> #include <ranges>

View File

@ -110,7 +110,8 @@ void registerBackupEngineS3(BackupFactory & factory)
params.allow_s3_native_copy, params.allow_s3_native_copy,
params.read_settings, params.read_settings,
params.write_settings, params.write_settings,
params.context); params.context,
params.is_internal_backup);
return std::make_unique<BackupImpl>( return std::make_unique<BackupImpl>(
params.backup_info, params.backup_info,
@ -129,7 +130,8 @@ void registerBackupEngineS3(BackupFactory & factory)
params.s3_storage_class, params.s3_storage_class,
params.read_settings, params.read_settings,
params.write_settings, params.write_settings,
params.context); params.context,
params.is_internal_backup);
return std::make_unique<BackupImpl>( return std::make_unique<BackupImpl>(
params.backup_info, params.backup_info,

View File

@ -207,11 +207,17 @@ std::vector<ConnectionPoolWithFailover::TryResult> ConnectionPoolWithFailover::g
max_entries = nested_pools.size(); max_entries = nested_pools.size();
} }
else if (pool_mode == PoolMode::GET_ONE) else if (pool_mode == PoolMode::GET_ONE)
{
max_entries = 1; max_entries = 1;
}
else if (pool_mode == PoolMode::GET_MANY) else if (pool_mode == PoolMode::GET_MANY)
{
max_entries = settings.max_parallel_replicas; max_entries = settings.max_parallel_replicas;
}
else else
{
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode"); throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown pool allocation mode");
}
if (!priority_func) if (!priority_func)
priority_func = makeGetPriorityFunc(settings); priority_func = makeGetPriorityFunc(settings);

View File

@ -82,7 +82,7 @@ std::vector<Connection *> HedgedConnectionsFactory::getManyConnections(PoolMode
} }
case PoolMode::GET_MANY: case PoolMode::GET_MANY:
{ {
max_entries = max_parallel_replicas; max_entries = std::min(max_parallel_replicas, shuffled_pools.size());
break; break;
} }
} }

View File

@ -158,7 +158,7 @@ private:
/// checking the number of requested replicas that are still in process). /// checking the number of requested replicas that are still in process).
size_t requested_connections_count = 0; size_t requested_connections_count = 0;
const size_t max_parallel_replicas = 0; const size_t max_parallel_replicas = 1;
const bool skip_unavailable_shards = false; const bool skip_unavailable_shards = false;
}; };

View File

@ -1,7 +1,13 @@
#include <Columns/ColumnAggregateFunction.h> #include <Columns/ColumnAggregateFunction.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <Columns/ColumnsCommon.h> #include <Columns/ColumnsCommon.h>
#include <Columns/MaskOperations.h> #include <Columns/MaskOperations.h>
#include <IO/Operators.h>
#include <IO/ReadBufferFromString.h>
#include <IO/WriteBufferFromArena.h>
#include <IO/WriteBufferFromString.h>
#include <Processors/Transforms/ColumnGathererTransform.h>
#include <Common/AlignedBuffer.h> #include <Common/AlignedBuffer.h>
#include <Common/Arena.h> #include <Common/Arena.h>
#include <Common/FieldVisitorToString.h> #include <Common/FieldVisitorToString.h>
@ -11,10 +17,6 @@
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
#include <Common/iota.h> #include <Common/iota.h>
#include <Common/typeid_cast.h> #include <Common/typeid_cast.h>
#include <IO/Operators.h>
#include <IO/WriteBufferFromArena.h>
#include <IO/WriteBufferFromString.h>
#include <Processors/Transforms/ColumnGathererTransform.h>
namespace DB namespace DB
@ -109,6 +111,11 @@ ConstArenas concatArenas(const ConstArenas & array, ConstArenaPtr arena)
} }
std::string ColumnAggregateFunction::getName() const
{
return "AggregateFunction(" + func->getName() + ")";
}
MutableColumnPtr ColumnAggregateFunction::convertToValues(MutableColumnPtr column) MutableColumnPtr ColumnAggregateFunction::convertToValues(MutableColumnPtr column)
{ {
/** If the aggregate function returns an unfinalized/unfinished state, /** If the aggregate function returns an unfinalized/unfinished state,

View File

@ -1,17 +1,9 @@
#pragma once #pragma once
#include <AggregateFunctions/IAggregateFunction.h> #include <AggregateFunctions/IAggregateFunction_fwd.h>
#include <Columns/IColumn.h> #include <Columns/IColumn.h>
#include <Common/PODArray.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <Common/PODArray.h>
#include <IO/ReadBufferFromString.h>
#include <IO/WriteBuffer.h>
#include <IO/WriteHelpers.h>
#include <Functions/FunctionHelpers.h>
namespace DB namespace DB
{ {
@ -26,6 +18,12 @@ using ArenaPtr = std::shared_ptr<Arena>;
using ConstArenaPtr = std::shared_ptr<const Arena>; using ConstArenaPtr = std::shared_ptr<const Arena>;
using ConstArenas = std::vector<ConstArenaPtr>; using ConstArenas = std::vector<ConstArenaPtr>;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
struct ColumnWithTypeAndName;
using ColumnsWithTypeAndName = std::vector<ColumnWithTypeAndName>;
/** Column of states of aggregate functions. /** Column of states of aggregate functions.
* Presented as an array of pointers to the states of aggregate functions (data). * Presented as an array of pointers to the states of aggregate functions (data).
@ -121,7 +119,7 @@ public:
/// This method is made static and receive MutableColumnPtr object to explicitly destroy it. /// This method is made static and receive MutableColumnPtr object to explicitly destroy it.
static MutableColumnPtr convertToValues(MutableColumnPtr column); static MutableColumnPtr convertToValues(MutableColumnPtr column);
std::string getName() const override { return "AggregateFunction(" + func->getName() + ")"; } std::string getName() const override;
const char * getFamilyName() const override { return "AggregateFunction"; } const char * getFamilyName() const override { return "AggregateFunction"; }
TypeIndex getDataType() const override { return TypeIndex::AggregateFunction; } TypeIndex getDataType() const override { return TypeIndex::AggregateFunction; }

View File

@ -1,12 +1,10 @@
#pragma once #pragma once
#include <Columns/IColumn.h>
#include <Core/Field.h> #include <Core/Field.h>
#include <Core/Names.h> #include <Core/Names.h>
#include <Columns/IColumn.h>
#include <Common/PODArray.h>
#include <Common/HashTable/HashMap.h>
#include <DataTypes/Serializations/JSONDataParser.h>
#include <DataTypes/Serializations/SubcolumnsTree.h> #include <DataTypes/Serializations/SubcolumnsTree.h>
#include <Common/PODArray.h>
#include <DataTypes/IDataType.h> #include <DataTypes/IDataType.h>

View File

@ -346,7 +346,7 @@ ColumnPtr ColumnSparse::filter(const Filter & filt, ssize_t) const
} }
auto res_values = values->filter(values_filter, values_result_size_hint); auto res_values = values->filter(values_filter, values_result_size_hint);
return this->create(res_values, std::move(res_offsets), res_offset); return create(res_values, std::move(res_offsets), res_offset);
} }
void ColumnSparse::expand(const Filter & mask, bool inverted) void ColumnSparse::expand(const Filter & mask, bool inverted)

View File

@ -671,7 +671,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
ReadableSize(rss), ReadableSize(rss),
ReadableSize(difference)); ReadableSize(difference));
total_memory_tracker.setRSS(rss, free_memory_in_allocator_arenas); MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas);
} }
} }

View File

@ -2,10 +2,13 @@
#include <memory> #include <memory>
#include <base/types.h>
#include <Common/Logger.h> #include <Common/Logger.h>
#include <Common/SharedMutex.h>
#include <Common/SharedLockGuard.h> #include <Common/SharedLockGuard.h>
#include <Common/SharedMutex.h>
namespace DB
{
/** AtomicLogger allows to atomically change logger. /** AtomicLogger allows to atomically change logger.
* Standard library does not have atomic_shared_ptr, and we do not use std::atomic* operations, * Standard library does not have atomic_shared_ptr, and we do not use std::atomic* operations,
@ -49,3 +52,5 @@ private:
mutable DB::SharedMutex log_mutex; mutable DB::SharedMutex log_mutex;
LoggerPtr logger; LoggerPtr logger;
}; };
}

View File

@ -0,0 +1,16 @@
#include <Common/CurrentThread.h>
#include <Common/CurrentThreadHelpers.h>
namespace DB
{
bool currentThreadHasGroup()
{
return DB::CurrentThread::getGroup() != nullptr;
}
LogsLevel currentThreadLogsLevel()
{
return DB::CurrentThread::get().getClientLogsLevel();
}
}

View File

@ -0,0 +1,9 @@
#pragma once
#include <Core/LogsLevel.h>
namespace DB
{
bool currentThreadHasGroup();
LogsLevel currentThreadLogsLevel();
}

View File

@ -1,13 +1,15 @@
#include "DateLUT.h" #include "DateLUT.h"
#include <Interpreters/Context.h>
#include <Common/CurrentThread.h>
#include <Common/filesystemHelpers.h>
#include <Poco/DigestStream.h> #include <Poco/DigestStream.h>
#include <Poco/Exception.h> #include <Poco/Exception.h>
#include <Poco/SHA1Engine.h> #include <Poco/SHA1Engine.h>
#include <Common/filesystemHelpers.h>
#include <filesystem> #include <filesystem>
#include <fstream> #include <fstream>
#include <Interpreters/Context.h>
namespace namespace
@ -140,6 +142,38 @@ std::string determineDefaultTimeZone()
} }
const DateLUTImpl & DateLUT::instance()
{
const auto & date_lut = getInstance();
if (DB::CurrentThread::isInitialized())
{
std::string timezone_from_context;
const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext();
if (query_context)
{
timezone_from_context = extractTimezoneFromContext(query_context);
if (!timezone_from_context.empty())
return date_lut.getImplementation(timezone_from_context);
}
/// On the server side, timezone is passed in query_context,
/// but on CH-client side we have no query context,
/// and each time we modify client's global context
const DB::ContextPtr global_context = DB::CurrentThread::get().getGlobalContext();
if (global_context)
{
timezone_from_context = extractTimezoneFromContext(global_context);
if (!timezone_from_context.empty())
return date_lut.getImplementation(timezone_from_context);
}
}
return serverTimezoneInstance();
}
DateLUT::DateLUT() DateLUT::DateLUT()
{ {
/// Initialize the pointer to the default DateLUTImpl. /// Initialize the pointer to the default DateLUTImpl.

View File

@ -1,17 +1,23 @@
#pragma once #pragma once
#include "DateLUTImpl.h"
#include <base/defines.h> #include <base/defines.h>
#include <base/types.h>
#include <boost/noncopyable.hpp> #include <boost/noncopyable.hpp>
#include "Common/CurrentThread.h"
#include <atomic> #include <atomic>
#include <memory> #include <memory>
#include <mutex> #include <mutex>
#include <unordered_map> #include <unordered_map>
namespace DB
{
class Context;
using ContextPtr = std::shared_ptr<const Context>;
}
class DateLUTImpl;
/// This class provides lazy initialization and lookup of singleton DateLUTImpl objects for a given timezone. /// This class provides lazy initialization and lookup of singleton DateLUTImpl objects for a given timezone.
class DateLUT : private boost::noncopyable class DateLUT : private boost::noncopyable
@ -20,38 +26,7 @@ public:
/// Return DateLUTImpl instance for session timezone. /// Return DateLUTImpl instance for session timezone.
/// session_timezone is a session-level setting. /// session_timezone is a session-level setting.
/// If setting is not set, returns the server timezone. /// If setting is not set, returns the server timezone.
static ALWAYS_INLINE const DateLUTImpl & instance() static const DateLUTImpl & instance();
{
const auto & date_lut = getInstance();
if (DB::CurrentThread::isInitialized())
{
std::string timezone_from_context;
const DB::ContextPtr query_context = DB::CurrentThread::get().getQueryContext();
if (query_context)
{
timezone_from_context = extractTimezoneFromContext(query_context);
if (!timezone_from_context.empty())
return date_lut.getImplementation(timezone_from_context);
}
/// On the server side, timezone is passed in query_context,
/// but on CH-client side we have no query context,
/// and each time we modify client's global context
const DB::ContextPtr global_context = DB::CurrentThread::get().getGlobalContext();
if (global_context)
{
timezone_from_context = extractTimezoneFromContext(global_context);
if (!timezone_from_context.empty())
return date_lut.getImplementation(timezone_from_context);
}
}
return serverTimezoneInstance();
}
static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone) static ALWAYS_INLINE const DateLUTImpl & instance(const std::string & time_zone)
{ {

View File

@ -1,8 +1,5 @@
#include "DateLUTImpl.h" #include <Core/DecimalFunctions.h>
#include <Common/DateLUTImpl.h>
#include <cctz/civil_time.h>
#include <cctz/time_zone.h>
#include <cctz/zone_info_source.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <algorithm> #include <algorithm>
@ -11,6 +8,10 @@
#include <cstring> #include <cstring>
#include <memory> #include <memory>
#include <cctz/civil_time.h>
#include <cctz/time_zone.h>
#include <cctz/zone_info_source.h>
namespace DB namespace DB
{ {
@ -214,6 +215,29 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
} }
} }
unsigned int DateLUTImpl::toMillisecond(const DB::DateTime64 & datetime, Int64 scale_multiplier) const
{
constexpr Int64 millisecond_multiplier = 1'000;
constexpr Int64 microsecond_multiplier = 1'000 * millisecond_multiplier;
constexpr Int64 divider = microsecond_multiplier / millisecond_multiplier;
auto components = DB::DecimalUtils::splitWithScaleMultiplier(datetime, scale_multiplier);
if (datetime.value < 0 && components.fractional)
{
components.fractional = scale_multiplier + (components.whole ? Int64(-1) : Int64(1)) * components.fractional;
--components.whole;
}
Int64 fractional = components.fractional;
if (scale_multiplier > microsecond_multiplier)
fractional = fractional / (scale_multiplier / microsecond_multiplier);
else if (scale_multiplier < microsecond_multiplier)
fractional = fractional * (microsecond_multiplier / scale_multiplier);
UInt16 millisecond = static_cast<UInt16>(fractional / divider);
return millisecond;
}
/// Prefer to load timezones from blobs linked to the binary. /// Prefer to load timezones from blobs linked to the binary.
/// The blobs are provided by "tzdata" library. /// The blobs are provided by "tzdata" library.

View File

@ -3,7 +3,6 @@
#include <base/DayNum.h> #include <base/DayNum.h>
#include <base/defines.h> #include <base/defines.h>
#include <base/types.h> #include <base/types.h>
#include <Core/DecimalFunctions.h>
#include <ctime> #include <ctime>
#include <cassert> #include <cassert>
@ -50,6 +49,11 @@ enum class WeekDayMode
WeekStartsSunday1 = 3 WeekStartsSunday1 = 3
}; };
namespace DB
{
class DateTime64;
}
/** Lookup table to conversion of time to date, and to month / year / day of week / day of month and so on. /** Lookup table to conversion of time to date, and to month / year / day of week / day of month and so on.
* First time was implemented for OLAPServer, that needed to do billions of such transformations. * First time was implemented for OLAPServer, that needed to do billions of such transformations.
*/ */
@ -593,29 +597,7 @@ public:
return time % 60; return time % 60;
} }
template <typename DateOrTime> unsigned toMillisecond(const DB::DateTime64 & datetime, Int64 scale_multiplier) const;
unsigned toMillisecond(const DateOrTime & datetime, Int64 scale_multiplier) const
{
constexpr Int64 millisecond_multiplier = 1'000;
constexpr Int64 microsecond_multiplier = 1'000 * millisecond_multiplier;
constexpr Int64 divider = microsecond_multiplier / millisecond_multiplier;
auto components = DB::DecimalUtils::splitWithScaleMultiplier(datetime, scale_multiplier);
if (datetime.value < 0 && components.fractional)
{
components.fractional = scale_multiplier + (components.whole ? Int64(-1) : Int64(1)) * components.fractional;
--components.whole;
}
Int64 fractional = components.fractional;
if (scale_multiplier > microsecond_multiplier)
fractional = fractional / (scale_multiplier / microsecond_multiplier);
else if (scale_multiplier < microsecond_multiplier)
fractional = fractional * (microsecond_multiplier / scale_multiplier);
UInt16 millisecond = static_cast<UInt16>(fractional / divider);
return millisecond;
}
unsigned toMinute(Time t) const unsigned toMinute(Time t) const
{ {

View File

@ -1,26 +1,27 @@
#include "Exception.h"
#include <algorithm>
#include <cstdlib>
#include <cstring>
#include <filesystem>
#include <cxxabi.h>
#include <IO/Operators.h> #include <IO/Operators.h>
#include <IO/ReadBufferFromFile.h> #include <IO/ReadBufferFromFile.h>
#include <IO/ReadBufferFromString.h> #include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <base/demangle.h> #include <base/demangle.h>
#include <Poco/String.h> #include <Common/AtomicLogger.h>
#include <Common/ErrorCodes.h> #include <Common/ErrorCodes.h>
#include <Common/Exception.h>
#include <Common/LockMemoryExceptionInThread.h> #include <Common/LockMemoryExceptionInThread.h>
#include <Common/MemorySanitizer.h> #include <Common/MemorySanitizer.h>
#include <Common/SensitiveDataMasker.h> #include <Common/SensitiveDataMasker.h>
#include <Common/config_version.h>
#include <Common/filesystemHelpers.h> #include <Common/filesystemHelpers.h>
#include <Common/formatReadable.h> #include <Common/formatReadable.h>
#include <Common/logger_useful.h> #include <Common/logger_useful.h>
#include <Common/config_version.h> #include <algorithm>
#include <cstdlib>
#include <cstring>
#include <filesystem>
#include <cxxabi.h>
#include <Poco/String.h>
namespace fs = std::filesystem; namespace fs = std::filesystem;

View File

@ -1,22 +1,20 @@
#pragma once #pragma once
#include <cerrno>
#include <exception>
#include <vector>
#include <memory>
#include <Poco/Exception.h>
#include <base/defines.h> #include <base/defines.h>
#include <base/errnoToString.h> #include <base/errnoToString.h>
#include <base/int8_to_string.h> #include <base/int8_to_string.h>
#include <base/scope_guard.h> #include <base/scope_guard.h>
#include <Common/AtomicLogger.h>
#include <Common/Logger.h> #include <Common/Logger.h>
#include <Common/LoggingFormatStringHelpers.h> #include <Common/LoggingFormatStringHelpers.h>
#include <Common/StackTrace.h> #include <Common/StackTrace.h>
#include <cerrno>
#include <exception>
#include <memory>
#include <vector>
#include <fmt/format.h> #include <fmt/format.h>
#include <Poco/Exception.h>
namespace Poco { class Logger; } namespace Poco { class Logger; }
@ -24,6 +22,8 @@ namespace Poco { class Logger; }
namespace DB namespace DB
{ {
class AtomicLogger;
[[noreturn]] void abortOnFailedAssertion(const String & description); [[noreturn]] void abortOnFailedAssertion(const String & description);
/// This flag can be set for testing purposes - to check that no exceptions are thrown. /// This flag can be set for testing purposes - to check that no exceptions are thrown.

View File

@ -10,6 +10,8 @@
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <base/JSON.h> #include <base/JSON.h>
#include <boost/range/adaptor/map.hpp>
namespace fs = std::filesystem; namespace fs = std::filesystem;

View File

@ -203,7 +203,7 @@ public:
if (total_connections_in_group >= limits.warning_limit && total_connections_in_group >= mute_warning_until) if (total_connections_in_group >= limits.warning_limit && total_connections_in_group >= mute_warning_until)
{ {
LOG_WARNING(log, "Too many active sessions in group {}, count {}, warning limit {}", type, total_connections_in_group, limits.warning_limit); LOG_WARNING(log, "Too many active sessions in group {}, count {}, warning limit {}", type, total_connections_in_group, limits.warning_limit);
mute_warning_until = roundUp(total_connections_in_group, limits.warning_step); mute_warning_until = roundUp(total_connections_in_group, HTTPConnectionPools::Limits::warning_step);
} }
} }
@ -295,8 +295,13 @@ private:
String getTarget() const String getTarget() const
{ {
if (!Session::getProxyConfig().host.empty()) if (!Session::getProxyConfig().host.empty())
return fmt::format("{} over proxy {}", Session::getHost(), Session::getProxyConfig().host); return fmt::format("{}:{} over proxy {}",
return Session::getHost(); Session::getHost(),
Session::getPort(),
Session::getProxyConfig().host);
return fmt::format("{}:{}",
Session::getHost(),
Session::getPort());
} }
void flushRequest() override void flushRequest() override
@ -472,7 +477,8 @@ public:
String getTarget() const String getTarget() const
{ {
if (!proxy_configuration.isEmpty()) if (!proxy_configuration.isEmpty())
return fmt::format("{} over proxy {}", host, proxy_configuration.host); return fmt::format("{} over proxy {}",
host, proxy_configuration.host);
return host; return host;
} }

View File

@ -207,7 +207,7 @@ public:
void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func) void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func)
{ {
DB::PrefetchingHelper prefetching; DB::PrefetchingHelper prefetching;
size_t prefetch_look_ahead = prefetching.getInitialLookAheadValue(); size_t prefetch_look_ahead = DB::PrefetchingHelper::getInitialLookAheadValue();
size_t i = 0; size_t i = 0;
auto prefetch_it = advanceIterator(this->begin(), prefetch_look_ahead); auto prefetch_it = advanceIterator(this->begin(), prefetch_look_ahead);
@ -216,10 +216,10 @@ public:
{ {
if constexpr (prefetch) if constexpr (prefetch)
{ {
if (i == prefetching.iterationsToMeasure()) if (i == DB::PrefetchingHelper::iterationsToMeasure())
{ {
prefetch_look_ahead = prefetching.calcPrefetchLookAhead(); prefetch_look_ahead = prefetching.calcPrefetchLookAhead();
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - prefetching.getInitialLookAheadValue()); prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - DB::PrefetchingHelper::getInitialLookAheadValue());
} }
if (prefetch_it != end) if (prefetch_it != end)

View File

@ -2,6 +2,7 @@
#if USE_JEMALLOC #if USE_JEMALLOC
#include <Common/Exception.h>
#include <Common/Stopwatch.h> #include <Common/Stopwatch.h>
#include <Common/logger_useful.h> #include <Common/logger_useful.h>
#include <jemalloc/jemalloc.h> #include <jemalloc/jemalloc.h>

View File

@ -1,9 +1,10 @@
#pragma once #pragma once
#include <cstring> #include <cstring>
#include <string>
#include <exception> #include <exception>
#include <string>
#include <Common/DateLUT.h> #include <Common/DateLUT.h>
#include <Common/DateLUTImpl.h>
/** Stores a calendar date in broken-down form (year, month, day-in-month). /** Stores a calendar date in broken-down form (year, month, day-in-month).

View File

@ -1,15 +1,20 @@
#pragma once #pragma once
#include <memory>
#include <base/defines.h> #include <base/defines.h>
#include <Poco/Channel.h> #include <memory>
#include <Poco/Logger.h> #include <Poco/Logger.h>
#include <Poco/Message.h> #include <Poco/Message.h>
using LoggerPtr = Poco::LoggerPtr; namespace Poco
{
class Channel;
class Logger;
using LoggerPtr = std::shared_ptr<Logger>;
}
using LoggerPtr = std::shared_ptr<Poco::Logger>;
using LoggerRawPtr = Poco::Logger *; using LoggerRawPtr = Poco::Logger *;
/** RAII wrappers around Poco/Logger.h. /** RAII wrappers around Poco/Logger.h.

Some files were not shown because too many files have changed in this diff Show More