mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge remote-tracking branch 'origin/master' into use-iobject-storage-for-table-engines-1
This commit is contained in:
commit
357e5e3ae6
@ -96,7 +96,6 @@ Checks: [
|
||||
'-modernize-use-default-member-init',
|
||||
'-modernize-use-emplace',
|
||||
'-modernize-use-nodiscard',
|
||||
'-modernize-use-override',
|
||||
'-modernize-use-trailing-return-type',
|
||||
|
||||
'-performance-inefficient-string-concatenation',
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 08ac76ea80a37f89b12109c805eafe9f1dc9b991
|
||||
Subproject commit cb5dc3c906e80f253e9ce9535807caef827cc2e0
|
@ -51,6 +51,12 @@ else()
|
||||
target_compile_definitions(_nuraft PRIVATE USE_BOOST_ASIO=1 BOOST_ASIO_STANDALONE=1)
|
||||
endif()
|
||||
|
||||
target_link_libraries (_nuraft PRIVATE clickhouse_common_io)
|
||||
# We must have it PUBLIC here because some headers which depend on it directly
|
||||
# included in clickhouse
|
||||
target_compile_definitions(_nuraft PUBLIC USE_CLICKHOUSE_THREADS=1)
|
||||
MESSAGE(STATUS "Will use clickhouse threads for NuRaft")
|
||||
|
||||
target_include_directories (_nuraft SYSTEM PRIVATE "${LIBRARY_DIR}/include/libnuraft")
|
||||
# for some reason include "asio.h" directly without "boost/" prefix.
|
||||
target_include_directories (_nuraft SYSTEM PRIVATE "${ClickHouse_SOURCE_DIR}/contrib/boost/boost")
|
||||
|
@ -16,6 +16,8 @@ ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb
|
||||
dpkg -i package_folder/clickhouse-library-bridge_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
|
@ -79,7 +79,7 @@ The supported formats are:
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✗ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
@ -1487,7 +1487,7 @@ Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000
|
||||
- [output_format_pretty_max_value_width](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_value_width) - Maximum width of value to display in Pretty formats. If greater - it will be cut. Default value - `10000`.
|
||||
- [output_format_pretty_color](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`.
|
||||
- [output_format_pretty_grid_charset](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `false`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `true`.
|
||||
|
||||
## RowBinary {#rowbinary}
|
||||
|
||||
@ -2465,7 +2465,7 @@ Result:
|
||||
|
||||
## Npy {#data-format-npy}
|
||||
|
||||
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
|
||||
This function is designed to load a NumPy array from a .npy file into ClickHouse. The NumPy file format is a binary format used for efficiently storing arrays of numerical data. During import, ClickHouse treats top level dimension as an array of rows with single column. Supported Npy data types and their corresponding type in ClickHouse:
|
||||
| Npy type | ClickHouse type |
|
||||
|:--------:|:---------------:|
|
||||
| b1 | UInt8 |
|
||||
|
@ -436,7 +436,7 @@ Default: 0
|
||||
Restriction on dropping partitions.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you can’t drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query.
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
Default value: 50 GB.
|
||||
The value 0 means that you can drop partitions without any restrictions.
|
||||
|
||||
@ -518,7 +518,7 @@ Restriction on deleting tables.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
The value 0 means that you can delete all tables without any restrictions.
|
||||
@ -1570,7 +1570,7 @@ Restriction on deleting tables.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_table_size_to_drop` (in bytes), you can’t delete it using a [DROP](../../sql-reference/statements/drop.md) query or [TRUNCATE](../../sql-reference/statements/truncate.md) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
|
||||
@ -1588,7 +1588,7 @@ Restriction on dropping partitions.
|
||||
|
||||
If the size of a [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table exceeds `max_partition_size_to_drop` (in bytes), you can’t drop a partition using a [DROP PARTITION](../../sql-reference/statements/alter/partition.md#drop-partitionpart) query.
|
||||
|
||||
This setting does not require a restart of the Clickhouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
This setting does not require a restart of the ClickHouse server to apply. Another way to disable the restriction is to create the `<clickhouse-path>/flags/force_drop_table` file.
|
||||
|
||||
Default value: 50 GB.
|
||||
|
||||
|
@ -1642,7 +1642,7 @@ Possible values:
|
||||
- 0 — Output without row numbers.
|
||||
- 1 — Output with row numbers.
|
||||
|
||||
Default value: `0`.
|
||||
Default value: `1`.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -190,22 +190,67 @@ SELECT toTypeName(variantType(v)) FROM test LIMIT 1;
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Conversion between Variant column and other columns
|
||||
## Conversion between a Variant column and other columns
|
||||
|
||||
There are 3 possible conversions that can be performed with Variant column.
|
||||
There are 4 possible conversions that can be performed with a column of type `Variant`.
|
||||
|
||||
### Converting an ordinary column to a Variant column
|
||||
### Converting a String column to a Variant column
|
||||
|
||||
It is possible to convert ordinary column with type `T` to a `Variant` column containing this type:
|
||||
Conversion from `String` to `Variant` is performed by parsing a value of `Variant` type from the string value:
|
||||
|
||||
```sql
|
||||
SELECT toTypeName(variant) as type_name, 'Hello, World!'::Variant(UInt64, String, Array(UInt64)) as variant;
|
||||
SELECT '42'::Variant(String, UInt64) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```text
|
||||
┌─type_name──────────────────────────────┬─variant───────┐
|
||||
│ Variant(Array(UInt64), String, UInt64) │ Hello, World! │
|
||||
└────────────────────────────────────────┴───────────────┘
|
||||
┌─variant─┬─variant_type─┐
|
||||
│ 42 │ UInt64 │
|
||||
└─────────┴──────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT '[1, 2, 3]'::Variant(String, Array(UInt64)) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```text
|
||||
┌─variant─┬─variant_type──┐
|
||||
│ [1,2,3] │ Array(UInt64) │
|
||||
└─────────┴───────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT CAST(map('key1', '42', 'key2', 'true', 'key3', '2020-01-01'), 'Map(String, Variant(UInt64, Bool, Date))') as map_of_variants, mapApply((k, v) -> (k, variantType(v)), map_of_variants) as map_of_variant_types```
|
||||
```
|
||||
|
||||
```text
|
||||
┌─map_of_variants─────────────────────────────┬─map_of_variant_types──────────────────────────┐
|
||||
│ {'key1':42,'key2':true,'key3':'2020-01-01'} │ {'key1':'UInt64','key2':'Bool','key3':'Date'} │
|
||||
└─────────────────────────────────────────────┴───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Converting an ordinary column to a Variant column
|
||||
|
||||
It is possible to convert an ordinary column with type `T` to a `Variant` column containing this type:
|
||||
|
||||
```sql
|
||||
SELECT toTypeName(variant) as type_name, [1,2,3]::Array(UInt64)::Variant(UInt64, String, Array(UInt64)) as variant, variantType(variant) as variant_name
|
||||
```
|
||||
|
||||
```text
|
||||
┌─type_name──────────────────────────────┬─variant─┬─variant_name──┐
|
||||
│ Variant(Array(UInt64), String, UInt64) │ [1,2,3] │ Array(UInt64) │
|
||||
└────────────────────────────────────────┴─────────┴───────────────┘
|
||||
```
|
||||
|
||||
Note: converting from `String` type is always performed through parsing, if you need to convert `String` column to `String` variant of a `Variant` without parsing, you can do the following:
|
||||
```sql
|
||||
SELECT '[1, 2, 3]'::Variant(String)::Variant(String, Array(UInt64), UInt64) as variant, variantType(variant) as variant_type
|
||||
```
|
||||
|
||||
```sql
|
||||
┌─variant───┬─variant_type─┐
|
||||
│ [1, 2, 3] │ String │
|
||||
└───────────┴──────────────┘
|
||||
```
|
||||
|
||||
### Converting a Variant column to an ordinary column
|
||||
|
@ -2776,7 +2776,7 @@ SELECT range(number) FROM system.numbers LIMIT 5 FORMAT PrettyCompactNoEscapes;
|
||||
- 0 — номера строк не выводятся.
|
||||
- 1 — номера строк выводятся.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
Значение по умолчанию: `1`.
|
||||
|
||||
**Пример**
|
||||
|
||||
@ -2798,7 +2798,7 @@ SELECT TOP 3 name, value FROM system.settings;
|
||||
```
|
||||
### output_format_pretty_color {#output_format_pretty_color}
|
||||
|
||||
Включает/выключает управляющие последовательности ANSI в форматах Pretty.
|
||||
Включает/выключает управляющие последовательности ANSI в форматах Pretty.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
@ -4123,7 +4123,7 @@ SELECT sum(number) FROM numbers(10000000000) SETTINGS partial_result_on_first_ca
|
||||
## session_timezone {#session_timezone}
|
||||
|
||||
Задаёт значение часового пояса (session_timezone) по умолчанию для текущей сессии вместо [часового пояса сервера](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). То есть, все значения DateTime/DateTime64, для которых явно не задан часовой пояс, будут интерпретированы как относящиеся к указанной зоне.
|
||||
При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера.
|
||||
При значении настройки `''` (пустая строка), будет совпадать с часовым поясом сервера.
|
||||
|
||||
Функции `timeZone()` and `serverTimezone()` возвращают часовой пояс текущей сессии и сервера соответственно.
|
||||
|
||||
|
@ -30,10 +30,6 @@ conflicts:
|
||||
contents:
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse-odbc-bridge.debug
|
||||
- src: root/usr/lib/debug/usr/bin/clickhouse-library-bridge.debug
|
||||
dst: /usr/lib/debug/usr/bin/clickhouse-library-bridge.debug
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-common-static-dbg/AUTHORS
|
||||
|
@ -36,10 +36,6 @@ contents:
|
||||
dst: /usr/bin/clickhouse
|
||||
- src: root/usr/bin/clickhouse-extract-from-config
|
||||
dst: /usr/bin/clickhouse-extract-from-config
|
||||
- src: root/usr/bin/clickhouse-library-bridge
|
||||
dst: /usr/bin/clickhouse-library-bridge
|
||||
- src: root/usr/bin/clickhouse-odbc-bridge
|
||||
dst: /usr/bin/clickhouse-odbc-bridge
|
||||
- src: root/usr/share/bash-completion/completions
|
||||
dst: /usr/share/bash-completion/completions
|
||||
- src: root/usr/share/clickhouse
|
||||
|
35
packages/clickhouse-library-bridge.yaml
Normal file
35
packages/clickhouse-library-bridge.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# package sources should be placed in ${PWD}/root
|
||||
# nfpm should run from the same directory with a config
|
||||
name: "clickhouse-library-bridge"
|
||||
description: |
|
||||
ClickHouse Library Bridge - is a separate process for loading libraries for the 'library' dictionary sources and the CatBoost library.
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
|
||||
# Common packages config
|
||||
arch: "${DEB_ARCH}" # amd64, arm64
|
||||
platform: "linux"
|
||||
version: "${CLICKHOUSE_VERSION_STRING}"
|
||||
vendor: "ClickHouse Inc."
|
||||
homepage: "https://clickhouse.com"
|
||||
license: "Apache"
|
||||
section: "database"
|
||||
priority: "optional"
|
||||
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
|
||||
deb:
|
||||
fields:
|
||||
Source: clickhouse
|
||||
|
||||
# Package specific content
|
||||
contents:
|
||||
- src: root/usr/bin/clickhouse-library-bridge
|
||||
dst: /usr/bin/clickhouse-library-bridge
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/AUTHORS
|
||||
- src: ../CHANGELOG.md
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/CHANGELOG.md
|
||||
- src: ../LICENSE
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/LICENSE
|
||||
- src: ../README.md
|
||||
dst: /usr/share/doc/clickhouse-library-bridge/README.md
|
35
packages/clickhouse-odbc-bridge.yaml
Normal file
35
packages/clickhouse-odbc-bridge.yaml
Normal file
@ -0,0 +1,35 @@
|
||||
# package sources should be placed in ${PWD}/root
|
||||
# nfpm should run from the same directory with a config
|
||||
name: "clickhouse-odbc-bridge"
|
||||
description: |
|
||||
ClickHouse ODBC Bridge - is a separate process for loading ODBC drivers and interacting with external databases using the ODBC protocol.
|
||||
ClickHouse is a column-oriented database management system
|
||||
that allows generating analytical data reports in real time.
|
||||
|
||||
# Common packages config
|
||||
arch: "${DEB_ARCH}" # amd64, arm64
|
||||
platform: "linux"
|
||||
version: "${CLICKHOUSE_VERSION_STRING}"
|
||||
vendor: "ClickHouse Inc."
|
||||
homepage: "https://clickhouse.com"
|
||||
license: "Apache"
|
||||
section: "database"
|
||||
priority: "optional"
|
||||
maintainer: "ClickHouse Dev Team <packages+linux@clickhouse.com>"
|
||||
deb:
|
||||
fields:
|
||||
Source: clickhouse
|
||||
|
||||
# Package specific content
|
||||
contents:
|
||||
- src: root/usr/bin/clickhouse-odbc-bridge
|
||||
dst: /usr/bin/clickhouse-odbc-bridge
|
||||
# docs
|
||||
- src: ../AUTHORS
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/AUTHORS
|
||||
- src: ../CHANGELOG.md
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/CHANGELOG.md
|
||||
- src: ../LICENSE
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/LICENSE
|
||||
- src: ../README.md
|
||||
dst: /usr/share/doc/clickhouse-odbc-bridge/README.md
|
@ -237,7 +237,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
ASTPtr res = parseQueryAndMovePosition(
|
||||
parser, pos, end, "query", multiple, cmd_settings.max_query_size, cmd_settings.max_parser_depth, cmd_settings.max_parser_backtracks);
|
||||
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload = nullptr;
|
||||
std::unique_ptr<ReadBuffer> insert_query_payload;
|
||||
/// If the query is INSERT ... VALUES, then we will try to parse the data.
|
||||
if (auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
|
||||
{
|
||||
|
@ -24,9 +24,4 @@ target_link_libraries(clickhouse-library-bridge PRIVATE
|
||||
|
||||
set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
|
||||
|
||||
if (SPLIT_DEBUG_SYMBOLS)
|
||||
clickhouse_split_debug_symbols(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-library-bridge)
|
||||
else()
|
||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
|
@ -35,7 +35,7 @@ public:
|
||||
ExternalDictionaryLibraryAPI::CStrings strings; // will pass pointer to lib
|
||||
|
||||
private:
|
||||
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder = nullptr;
|
||||
std::unique_ptr<ExternalDictionaryLibraryAPI::CString[]> ptr_holder;
|
||||
Container strings_holder;
|
||||
};
|
||||
|
||||
|
@ -30,12 +30,7 @@ target_link_libraries(clickhouse-odbc-bridge PRIVATE
|
||||
set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
|
||||
target_compile_options (clickhouse-odbc-bridge PRIVATE -Wno-reserved-id-macro -Wno-keyword-macro)
|
||||
|
||||
if (SPLIT_DEBUG_SYMBOLS)
|
||||
clickhouse_split_debug_symbols(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-odbc-bridge)
|
||||
else()
|
||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(tests)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <Poco/Net/HTTPServerRequest.h>
|
||||
#include <Poco/Net/HTTPServerResponse.h>
|
||||
#include <Poco/NumberParser.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/BridgeProtocolVersion.h>
|
||||
#include <Common/quoteString.h>
|
||||
|
@ -5,7 +5,6 @@
|
||||
#if USE_ODBC
|
||||
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Server/HTTP/HTTPRequestHandler.h>
|
||||
#include <Poco/Logger.h>
|
||||
|
||||
|
@ -96,7 +96,7 @@
|
||||
<to>https://{bucket}.s3.amazonaws.com</to>
|
||||
</s3>
|
||||
<gs>
|
||||
<to>https://{bucket}.storage.googleapis.com</to>
|
||||
<to>https://storage.googleapis.com/{bucket}</to>
|
||||
</gs>
|
||||
<oss>
|
||||
<to>https://{bucket}.oss.aliyuncs.com</to>
|
||||
|
@ -115,34 +115,34 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], row_num, arena);
|
||||
data(place).add(*columns[0], row_num, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
|
||||
{
|
||||
this->data(place).addManyDefaults(*columns[0], 0, arena);
|
||||
data(place).addManyDefaults(*columns[0], 0, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(this->data(rhs), arena);
|
||||
data(place).add(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -559,7 +559,7 @@ public:
|
||||
ptr = ptrs[row_num];
|
||||
}
|
||||
|
||||
this->data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
data(place).add(ptr, allocated, trace_values.data() + prev_offset, trace_size, arena);
|
||||
}
|
||||
|
||||
void addManyDefaults(
|
||||
@ -572,7 +572,7 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs), arena);
|
||||
data(place).merge(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t> /* version */) const override
|
||||
@ -590,7 +590,7 @@ public:
|
||||
auto & array = assert_cast<ColumnArray &>(to);
|
||||
auto & str = assert_cast<ColumnString &>(array.getData());
|
||||
|
||||
this->data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
data(place).dumpFlameGraph(str.getChars(), str.getOffsets(), 0, 0);
|
||||
|
||||
array.getOffsets().push_back(str.size());
|
||||
}
|
||||
|
@ -89,10 +89,10 @@ struct GroupArraySamplerData
|
||||
chassert(lim != 0);
|
||||
|
||||
/// With a large number of values, we will generate random numbers several times slower.
|
||||
if (lim <= static_cast<UInt64>(rng.max()))
|
||||
if (lim <= static_cast<UInt64>(pcg32_fast::max()))
|
||||
return rng() % lim;
|
||||
else
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||
}
|
||||
|
||||
void randomShuffle()
|
||||
|
@ -242,7 +242,7 @@ public:
|
||||
{
|
||||
Float64 x = getFloat64DataFromColumn(columns[0], row_num, this->x_type);
|
||||
Float64 y = getFloat64DataFromColumn(columns[1], row_num, this->y_type);
|
||||
this->data(place).add(x, y, arena);
|
||||
data(place).add(x, y, arena);
|
||||
}
|
||||
|
||||
Float64 getFloat64DataFromColumn(const IColumn * column, size_t row_num, TypeIndex type_index) const
|
||||
@ -264,25 +264,25 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
|
||||
{
|
||||
auto res = this->data(place).getResult(total_buckets, arena);
|
||||
auto res = data(place).getResult(total_buckets, arena);
|
||||
|
||||
auto & col = assert_cast<ColumnArray &>(to);
|
||||
auto & col_offsets = assert_cast<ColumnArray::ColumnOffsets &>(col.getOffsetsColumn());
|
||||
|
@ -205,35 +205,35 @@ public:
|
||||
UInt8 is_second = columns[1]->getUInt(row_num);
|
||||
|
||||
if (is_second)
|
||||
this->data(place).addY(value, arena);
|
||||
data(place).addY(value, arena);
|
||||
else
|
||||
this->data(place).addX(value, arena);
|
||||
data(place).addX(value, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
if (!this->data(place).size_x || !this->data(place).size_y)
|
||||
if (!data(place).size_x || !data(place).size_y)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} require both samples to be non empty", getName());
|
||||
|
||||
auto [u_statistic, p_value] = this->data(place).getResult(alternative, continuity_correction);
|
||||
auto [u_statistic, p_value] = data(place).getResult(alternative, continuity_correction);
|
||||
|
||||
/// Because p-value is a probability.
|
||||
p_value = std::min(1.0, std::max(0.0, p_value));
|
||||
|
@ -66,31 +66,31 @@ public:
|
||||
{
|
||||
Float64 new_x = columns[0]->getFloat64(row_num);
|
||||
Float64 new_y = columns[1]->getFloat64(row_num);
|
||||
this->data(place).addX(new_x, arena);
|
||||
this->data(place).addY(new_y, arena);
|
||||
data(place).addX(new_x, arena);
|
||||
data(place).addY(new_y, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
auto & a = this->data(place);
|
||||
const auto & b = this->data(rhs);
|
||||
auto & a = data(place);
|
||||
const auto & b = data(rhs);
|
||||
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
data(place).read(buf, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto answer = this->data(place).getResult();
|
||||
auto answer = data(place).getResult();
|
||||
|
||||
auto & column = static_cast<ColumnVector<Float64> &>(to);
|
||||
column.getData().push_back(answer);
|
||||
|
@ -102,24 +102,24 @@ public:
|
||||
auto event = assert_cast<const ColumnVector<UInt8> *>(columns[i])->getData()[row_num];
|
||||
if (event)
|
||||
{
|
||||
this->data(place).add(i);
|
||||
data(place).add(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
@ -130,13 +130,13 @@ public:
|
||||
ColumnArray::Offset current_offset = data_to.size();
|
||||
data_to.resize(current_offset + events_size);
|
||||
|
||||
const bool first_flag = this->data(place).events.test(0);
|
||||
const bool first_flag = data(place).events.test(0);
|
||||
data_to[current_offset] = first_flag;
|
||||
++current_offset;
|
||||
|
||||
for (size_t i = 1; i < events_size; ++i)
|
||||
{
|
||||
data_to[current_offset] = (first_flag && this->data(place).events.test(i));
|
||||
data_to[current_offset] = (first_flag && data(place).events.test(i));
|
||||
++current_offset;
|
||||
}
|
||||
|
||||
|
@ -123,22 +123,22 @@ public:
|
||||
Float64 x = columns[0]->getFloat64(row_num);
|
||||
Float64 y = columns[1]->getFloat64(row_num);
|
||||
|
||||
this->data(place).add(x, y);
|
||||
data(place).add(x, y);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
static DataTypePtr createResultType()
|
||||
@ -168,8 +168,8 @@ public:
|
||||
IColumn & to,
|
||||
Arena *) const override
|
||||
{
|
||||
Float64 k = this->data(place).getK();
|
||||
Float64 b = this->data(place).getB(k);
|
||||
Float64 k = data(place).getK();
|
||||
Float64 b = data(place).getB(k);
|
||||
|
||||
auto & col_tuple = assert_cast<ColumnTuple &>(to);
|
||||
auto & col_k = assert_cast<ColumnVector<Float64> &>(col_tuple.getColumn(0));
|
||||
|
@ -120,7 +120,7 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], row_num, arena);
|
||||
data(place).add(*columns[0], row_num, arena);
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(
|
||||
@ -131,7 +131,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override
|
||||
{
|
||||
if (this->data(place).isNull())
|
||||
if (data(place).isNull())
|
||||
return;
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlace(
|
||||
row_begin, row_end, place, columns, arena, if_argument_pos);
|
||||
@ -146,7 +146,7 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos) const override
|
||||
{
|
||||
if (this->data(place).isNull())
|
||||
if (data(place).isNull())
|
||||
return;
|
||||
IAggregateFunctionDataHelper<Data, AggregateFunctionSingleValueOrNull>::addBatchSinglePlaceNotNull(
|
||||
row_begin, row_end, place, columns, null_map, arena, if_argument_pos);
|
||||
@ -154,29 +154,29 @@ public:
|
||||
|
||||
void addManyDefaults(AggregateDataPtr __restrict place, const IColumn ** columns, size_t, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(*columns[0], 0, arena);
|
||||
data(place).add(*columns[0], 0, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
this->data(place).add(this->data(rhs), arena);
|
||||
data(place).add(data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
||||
bool allocatesMemoryInArena() const override { return singleValueTypeAllocatesMemoryInArena(value_type_index); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
this->data(place).insertResultInto(to);
|
||||
data(place).insertResultInto(to);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -150,13 +150,13 @@ private:
|
||||
|
||||
Float64 getResult(ConstAggregateDataPtr __restrict place) const
|
||||
{
|
||||
const auto & data = this->data(place);
|
||||
const auto & dt = data(place);
|
||||
switch (kind)
|
||||
{
|
||||
case VarKind::varSampStable: return getVarSamp(data.m2, data.count);
|
||||
case VarKind::stddevSampStable: return getStddevSamp(data.m2, data.count);
|
||||
case VarKind::varPopStable: return getVarPop(data.m2, data.count);
|
||||
case VarKind::stddevPopStable: return getStddevPop(data.m2, data.count);
|
||||
case VarKind::varSampStable: return getVarSamp(dt.m2, dt.count);
|
||||
case VarKind::stddevSampStable: return getStddevSamp(dt.m2, dt.count);
|
||||
case VarKind::varPopStable: return getVarPop(dt.m2, dt.count);
|
||||
case VarKind::stddevPopStable: return getStddevPop(dt.m2, dt.count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,22 +182,22 @@ public:
|
||||
|
||||
void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
this->data(place).update(*columns[0], row_num);
|
||||
data(place).update(*columns[0], row_num);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).mergeWith(this->data(rhs));
|
||||
data(place).mergeWith(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
|
@ -491,7 +491,7 @@ public:
|
||||
std::vector<const UInt8 *> nullable_filters;
|
||||
const IColumn * nested_columns[number_of_arguments];
|
||||
|
||||
std::unique_ptr<UInt8[]> final_flags = nullptr;
|
||||
std::unique_ptr<UInt8[]> final_flags;
|
||||
const UInt8 * final_flags_ptr = nullptr;
|
||||
|
||||
if (if_argument_pos >= 0)
|
||||
|
@ -258,10 +258,10 @@ private:
|
||||
chassert(limit > 0);
|
||||
|
||||
/// With a large number of values, we will generate random numbers several times slower.
|
||||
if (limit <= static_cast<UInt64>(rng.max()))
|
||||
if (limit <= static_cast<UInt64>(pcg32_fast::max()))
|
||||
return rng() % limit;
|
||||
else
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(rng.max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
|
||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32_fast::max()) + 1ULL) + static_cast<UInt64>(rng())) % limit;
|
||||
}
|
||||
|
||||
void sortIfNeeded()
|
||||
|
@ -579,7 +579,7 @@ std::optional<size_t> SingleValueDataFixed<T>::getGreatestIndexNotNullIf(
|
||||
return std::nullopt;
|
||||
|
||||
for (size_t i = index + 1; i < row_end; i++)
|
||||
if ((!if_map || if_map[i] != 0) && (!null_map || null_map[i] == 0) && (vec[i] < vec[index]))
|
||||
if ((!if_map || if_map[i] != 0) && (!null_map || null_map[i] == 0) && (vec[i] > vec[index]))
|
||||
index = i;
|
||||
return {index};
|
||||
}
|
||||
|
@ -1940,8 +1940,7 @@ std::vector<String> QueryAnalyzer::collectIdentifierTypoHints(const Identifier &
|
||||
for (const auto & valid_identifier : valid_identifiers)
|
||||
prompting_strings.push_back(valid_identifier.getFullName());
|
||||
|
||||
NamePrompter<1> prompter;
|
||||
return prompter.getHints(unresolved_identifier.getFullName(), prompting_strings);
|
||||
return NamePrompter<1>::getHints(unresolved_identifier.getFullName(), prompting_strings);
|
||||
}
|
||||
|
||||
/** Wrap expression node in tuple element function calls for nested paths.
|
||||
|
@ -346,7 +346,7 @@ ColumnPtr ColumnSparse::filter(const Filter & filt, ssize_t) const
|
||||
}
|
||||
|
||||
auto res_values = values->filter(values_filter, values_result_size_hint);
|
||||
return this->create(res_values, std::move(res_offsets), res_offset);
|
||||
return create(res_values, std::move(res_offsets), res_offset);
|
||||
}
|
||||
|
||||
void ColumnSparse::expand(const Filter & mask, bool inverted)
|
||||
|
@ -671,7 +671,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update)
|
||||
ReadableSize(rss),
|
||||
ReadableSize(difference));
|
||||
|
||||
total_memory_tracker.setRSS(rss, free_memory_in_allocator_arenas);
|
||||
MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ public:
|
||||
if (total_connections_in_group >= limits.warning_limit && total_connections_in_group >= mute_warning_until)
|
||||
{
|
||||
LOG_WARNING(log, "Too many active sessions in group {}, count {}, warning limit {}", type, total_connections_in_group, limits.warning_limit);
|
||||
mute_warning_until = roundUp(total_connections_in_group, limits.warning_step);
|
||||
mute_warning_until = roundUp(total_connections_in_group, HTTPConnectionPools::Limits::warning_step);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ public:
|
||||
void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func)
|
||||
{
|
||||
DB::PrefetchingHelper prefetching;
|
||||
size_t prefetch_look_ahead = prefetching.getInitialLookAheadValue();
|
||||
size_t prefetch_look_ahead = DB::PrefetchingHelper::getInitialLookAheadValue();
|
||||
|
||||
size_t i = 0;
|
||||
auto prefetch_it = advanceIterator(this->begin(), prefetch_look_ahead);
|
||||
@ -216,10 +216,10 @@ public:
|
||||
{
|
||||
if constexpr (prefetch)
|
||||
{
|
||||
if (i == prefetching.iterationsToMeasure())
|
||||
if (i == DB::PrefetchingHelper::iterationsToMeasure())
|
||||
{
|
||||
prefetch_look_ahead = prefetching.calcPrefetchLookAhead();
|
||||
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - prefetching.getInitialLookAheadValue());
|
||||
prefetch_it = advanceIterator(prefetch_it, prefetch_look_ahead - DB::PrefetchingHelper::getInitialLookAheadValue());
|
||||
}
|
||||
|
||||
if (prefetch_it != end)
|
||||
|
@ -366,7 +366,7 @@ String demangleAndCollapseNames(std::optional<std::string_view> file, const char
|
||||
if (file.has_value())
|
||||
{
|
||||
std::string_view file_copy = file.value();
|
||||
if (auto trim_pos = file_copy.find_last_of('/'); trim_pos != file_copy.npos)
|
||||
if (auto trim_pos = file_copy.find_last_of('/'); trim_pos != std::string_view::npos)
|
||||
file_copy.remove_suffix(file_copy.size() - trim_pos);
|
||||
if (file_copy.ends_with("functional"))
|
||||
return "?";
|
||||
|
@ -96,7 +96,7 @@ ThreadStatus::ThreadStatus(bool check_current_thread_on_destruction_)
|
||||
stack_t altstack_description{};
|
||||
altstack_description.ss_sp = alt_stack.getData();
|
||||
altstack_description.ss_flags = 0;
|
||||
altstack_description.ss_size = alt_stack.getSize();
|
||||
altstack_description.ss_size = ThreadStack::getSize();
|
||||
|
||||
if (0 != sigaltstack(&altstack_description, nullptr))
|
||||
{
|
||||
|
39
src/Common/randomDelay.cpp
Normal file
39
src/Common/randomDelay.cpp
Normal file
@ -0,0 +1,39 @@
|
||||
#include <Common/randomDelay.h>
|
||||
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/randomNumber.h>
|
||||
#include <base/sleep.h>
|
||||
|
||||
|
||||
void randomDelayForMaxMilliseconds(uint64_t milliseconds, LoggerPtr log, const char * start_of_message)
|
||||
{
|
||||
if (milliseconds)
|
||||
{
|
||||
auto count = randomNumber() % milliseconds;
|
||||
|
||||
if (log)
|
||||
{
|
||||
if (start_of_message && !*start_of_message)
|
||||
start_of_message = nullptr;
|
||||
|
||||
LOG_TEST(log, "{}{}Sleeping for {} milliseconds",
|
||||
(start_of_message ? start_of_message : ""),
|
||||
(start_of_message ? ": " : ""),
|
||||
count);
|
||||
}
|
||||
|
||||
sleepForMilliseconds(count);
|
||||
|
||||
if (log)
|
||||
{
|
||||
LOG_TEST(log, "{}{}Awaking after sleeping",
|
||||
(start_of_message ? start_of_message : ""),
|
||||
(start_of_message ? ": " : ""));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void randomDelayForMaxSeconds(uint64_t seconds, LoggerPtr log, const char * start_of_message)
|
||||
{
|
||||
randomDelayForMaxMilliseconds(seconds * 1000, log, start_of_message);
|
||||
}
|
8
src/Common/randomDelay.h
Normal file
8
src/Common/randomDelay.h
Normal file
@ -0,0 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/Logger.h>
|
||||
|
||||
/// Sleeps for random duration between 0 and a specified number of milliseconds, optionally outputs a logging message about that.
|
||||
/// This function can be used to add random delays in tests.
|
||||
void randomDelayForMaxMilliseconds(uint64_t milliseconds, LoggerPtr log = nullptr, const char * start_of_message = nullptr);
|
||||
void randomDelayForMaxSeconds(uint64_t seconds, LoggerPtr log = nullptr, const char * start_of_message = nullptr);
|
@ -38,7 +38,7 @@ void CachedCompressedReadBuffer::prefetch(Priority priority)
|
||||
bool CachedCompressedReadBuffer::nextImpl()
|
||||
{
|
||||
/// Let's check for the presence of a decompressed block in the cache, grab the ownership of this block, if it exists.
|
||||
UInt128 key = cache->hash(path, file_pos);
|
||||
UInt128 key = UncompressedCache::hash(path, file_pos);
|
||||
|
||||
owned_cell = cache->getOrSet(key, [&]()
|
||||
{
|
||||
|
@ -592,7 +592,7 @@ String RecalculateCommand::run()
|
||||
|
||||
String CleanResourcesCommand::run()
|
||||
{
|
||||
keeper_dispatcher.cleanResources();
|
||||
KeeperDispatcher::cleanResources();
|
||||
return "ok";
|
||||
}
|
||||
|
||||
|
@ -194,12 +194,12 @@ ReturnType deserializeImpl(
|
||||
buf.dropCheckpoint();
|
||||
if (buf.hasUnreadData())
|
||||
{
|
||||
restore_column_if_needed();
|
||||
if constexpr (throw_exception)
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_PARSE_BOOL,
|
||||
"Cannot continue parsing after parsed bool value because it will result in the loss of some data. It may happen if "
|
||||
"bool_true_representation or bool_false_representation contains some delimiters of input format");
|
||||
restore_column_if_needed();
|
||||
return ReturnType(false);
|
||||
}
|
||||
return ReturnType(true);
|
||||
|
@ -463,6 +463,9 @@ DataTypePtr getLeastSupertype(const DataTypes & types)
|
||||
/// nested_type will be nullptr, we should return nullptr in this case.
|
||||
if (!nested_type)
|
||||
return nullptr;
|
||||
/// Common type for Nullable(Nothing) and Variant(...) is Variant(...)
|
||||
if (isVariant(nested_type))
|
||||
return nested_type;
|
||||
return std::make_shared<DataTypeNullable>(nested_type);
|
||||
}
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ std::pair<String, StoragePtr> createTableFromAST(
|
||||
ast_create_query.setDatabase(database_name);
|
||||
|
||||
if (ast_create_query.select && ast_create_query.isView())
|
||||
ApplyWithSubqueryVisitor().visit(*ast_create_query.select);
|
||||
ApplyWithSubqueryVisitor::visit(*ast_create_query.select);
|
||||
|
||||
if (ast_create_query.as_table_function)
|
||||
{
|
||||
|
@ -171,7 +171,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
|
||||
auto ast = parseQueryFromMetadata(log, getContext(), full_path.string(), /*throw_on_error*/ true, /*remove_empty*/ false);
|
||||
if (ast)
|
||||
{
|
||||
FunctionNameNormalizer().visit(ast.get());
|
||||
FunctionNameNormalizer::visit(ast.get());
|
||||
auto * create_query = ast->as<ASTCreateQuery>();
|
||||
/// NOTE No concurrent writes are possible during database loading
|
||||
create_query->setDatabase(TSA_SUPPRESS_WARNING_FOR_READ(database_name));
|
||||
|
@ -448,7 +448,7 @@ std::vector<StorageID> TablesDependencyGraph::getTables() const
|
||||
void TablesDependencyGraph::mergeWith(const TablesDependencyGraph & other)
|
||||
{
|
||||
for (const auto & other_node : other.nodes)
|
||||
addDependencies(other_node->storage_id, other.getDependencies(*other_node));
|
||||
addDependencies(other_node->storage_id, TablesDependencyGraph::getDependencies(*other_node));
|
||||
}
|
||||
|
||||
|
||||
|
@ -413,7 +413,7 @@ void FlatDictionary::blockToAttributes(const Block & block)
|
||||
const auto keys_column = block.safeGetByPosition(0).column;
|
||||
|
||||
DictionaryKeysArenaHolder<DictionaryKeyType::Simple> arena_holder;
|
||||
DictionaryKeysExtractor<DictionaryKeyType::Simple> keys_extractor({ keys_column }, arena_holder.getComplexKeyArena());
|
||||
DictionaryKeysExtractor<DictionaryKeyType::Simple> keys_extractor({ keys_column }, arena_holder.getComplexKeyArena()); /// NOLINT(readability-static-accessed-through-instance)
|
||||
size_t keys_size = keys_extractor.getKeysSize();
|
||||
|
||||
static constexpr size_t key_offset = 1;
|
||||
|
@ -214,7 +214,7 @@ public:
|
||||
static constexpr Coord kEps = 1e-4f;
|
||||
|
||||
private:
|
||||
std::unique_ptr<ICell<ReturnCell>> root = nullptr;
|
||||
std::unique_ptr<ICell<ReturnCell>> root;
|
||||
Coord min_x = 0, min_y = 0;
|
||||
Coord max_x = 0, max_y = 0;
|
||||
const size_t k_min_intersections;
|
||||
|
@ -581,7 +581,7 @@ try
|
||||
auto disk_ptr = std::static_pointer_cast<DiskLocal>(shared_from_this());
|
||||
auto tmp_file = std::make_unique<TemporaryFileOnDisk>(disk_ptr);
|
||||
auto buf = std::make_unique<WriteBufferFromTemporaryFile>(std::move(tmp_file));
|
||||
buf->write(data.data, data.PAGE_SIZE_IN_BYTES);
|
||||
buf->write(data.data, DiskWriteCheckData::PAGE_SIZE_IN_BYTES);
|
||||
buf->finalize();
|
||||
buf->sync();
|
||||
}
|
||||
|
@ -100,7 +100,11 @@ protected:
|
||||
validateDataType(type, data_type_validation_settings);
|
||||
|
||||
if (cast_type == CastType::accurateOrNull)
|
||||
return makeNullable(type);
|
||||
{
|
||||
/// Variant handles NULLs by itself during conversions.
|
||||
if (!isVariant(type))
|
||||
return makeNullable(type);
|
||||
}
|
||||
|
||||
if (internal)
|
||||
return type;
|
||||
|
@ -90,7 +90,6 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN;
|
||||
extern const int CANNOT_PARSE_BOOL;
|
||||
extern const int VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE;
|
||||
}
|
||||
|
||||
@ -1816,6 +1815,7 @@ struct ConvertImpl
|
||||
|
||||
|
||||
/// Generic conversion of any type from String. Used for complex types: Array and Tuple or types with custom serialization.
|
||||
template <bool throw_on_error>
|
||||
struct ConvertImplGenericFromString
|
||||
{
|
||||
static ColumnPtr execute(ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * column_nullable, size_t input_rows_count)
|
||||
@ -1855,29 +1855,34 @@ struct ConvertImplGenericFromString
|
||||
{
|
||||
serialization_from.deserializeWholeText(column_to, read_buffer, format_settings);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
catch (const Exception &)
|
||||
{
|
||||
auto * nullable_column = typeid_cast<ColumnNullable *>(&column_to);
|
||||
if (e.code() == ErrorCodes::CANNOT_PARSE_BOOL && nullable_column)
|
||||
{
|
||||
auto & col_nullmap = nullable_column->getNullMapData();
|
||||
if (col_nullmap.size() != nullable_column->size())
|
||||
col_nullmap.resize_fill(nullable_column->size());
|
||||
if (nullable_column->size() == (i + 1))
|
||||
nullable_column->popBack(1);
|
||||
nullable_column->insertDefault();
|
||||
continue;
|
||||
}
|
||||
throw;
|
||||
if constexpr (throw_on_error)
|
||||
throw;
|
||||
/// Check if exception happened after we inserted the value
|
||||
/// (deserializeWholeText should not do it, but let's check anyway).
|
||||
if (column_to.size() > i)
|
||||
column_to.popBack(column_to.size() - i);
|
||||
column_to.insertDefault();
|
||||
}
|
||||
|
||||
/// Usually deserializeWholeText checks for eof after parsing, but let's check one more time just in case.
|
||||
if (!read_buffer.eof())
|
||||
{
|
||||
if (result_type)
|
||||
throwExceptionForIncompletelyParsedValue(read_buffer, *result_type);
|
||||
if constexpr (throw_on_error)
|
||||
{
|
||||
if (result_type)
|
||||
throwExceptionForIncompletelyParsedValue(read_buffer, *result_type);
|
||||
else
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse string to column {}. Expected eof", column_to.getName());
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::CANNOT_PARSE_TEXT,
|
||||
"Cannot parse string to column {}. Expected eof", column_to.getName());
|
||||
{
|
||||
if (column_to.size() > i)
|
||||
column_to.popBack(column_to.size() - i);
|
||||
column_to.insertDefault();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3280,7 +3285,9 @@ private:
|
||||
{
|
||||
if (checkAndGetDataType<DataTypeString>(from_type.get()))
|
||||
{
|
||||
return &ConvertImplGenericFromString::execute;
|
||||
if (cast_type == CastType::accurateOrNull)
|
||||
return &ConvertImplGenericFromString<false>::execute;
|
||||
return &ConvertImplGenericFromString<true>::execute;
|
||||
}
|
||||
|
||||
return createWrapper<ToDataType>(from_type, to_type, requested_result_is_nullable);
|
||||
@ -3443,7 +3450,7 @@ private:
|
||||
/// Conversion from String through parsing.
|
||||
if (checkAndGetDataType<DataTypeString>(from_type_untyped.get()))
|
||||
{
|
||||
return &ConvertImplGenericFromString::execute;
|
||||
return &ConvertImplGenericFromString<true>::execute;
|
||||
}
|
||||
else if (const auto * agg_type = checkAndGetDataType<DataTypeAggregateFunction>(from_type_untyped.get()))
|
||||
{
|
||||
@ -3486,7 +3493,7 @@ private:
|
||||
/// Conversion from String through parsing.
|
||||
if (checkAndGetDataType<DataTypeString>(from_type_untyped.get()))
|
||||
{
|
||||
return &ConvertImplGenericFromString::execute;
|
||||
return &ConvertImplGenericFromString<true>::execute;
|
||||
}
|
||||
|
||||
DataTypePtr from_type_holder;
|
||||
@ -3577,7 +3584,7 @@ private:
|
||||
/// Conversion from String through parsing.
|
||||
if (checkAndGetDataType<DataTypeString>(from_type_untyped.get()))
|
||||
{
|
||||
return &ConvertImplGenericFromString::execute;
|
||||
return &ConvertImplGenericFromString<true>::execute;
|
||||
}
|
||||
|
||||
const auto * from_type = checkAndGetDataType<DataTypeTuple>(from_type_untyped.get());
|
||||
@ -3922,7 +3929,7 @@ private:
|
||||
{
|
||||
return [] (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count)
|
||||
{
|
||||
auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count)->assumeMutable();
|
||||
auto res = ConvertImplGenericFromString<true>::execute(arguments, result_type, nullable_source, input_rows_count)->assumeMutable();
|
||||
res->finalize();
|
||||
return res;
|
||||
};
|
||||
@ -4077,6 +4084,29 @@ private:
|
||||
return ColumnVariant::create(discriminators, variants);
|
||||
}
|
||||
|
||||
WrapperType createStringToVariantWrapper() const
|
||||
{
|
||||
return [&](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr
|
||||
{
|
||||
auto column = arguments[0].column->convertToFullColumnIfLowCardinality();
|
||||
auto args = arguments;
|
||||
args[0].column = column;
|
||||
|
||||
const ColumnNullable * column_nullable = nullptr;
|
||||
if (isColumnNullable(*args[0].column))
|
||||
{
|
||||
column_nullable = assert_cast<const ColumnNullable *>(args[0].column.get());
|
||||
args[0].column = column_nullable->getNestedColumnPtr();
|
||||
}
|
||||
|
||||
args[0].type = removeNullable(removeLowCardinality(args[0].type));
|
||||
|
||||
if (cast_type == CastType::accurateOrNull)
|
||||
return ConvertImplGenericFromString<false>::execute(args, result_type, column_nullable, input_rows_count);
|
||||
return ConvertImplGenericFromString<true>::execute(args, result_type, column_nullable, input_rows_count);
|
||||
};
|
||||
}
|
||||
|
||||
WrapperType createColumnToVariantWrapper(const DataTypePtr & from_type, const DataTypeVariant & to_variant) const
|
||||
{
|
||||
/// We allow converting NULL to Variant(...) as Variant can store NULLs.
|
||||
@ -4091,6 +4121,10 @@ private:
|
||||
}
|
||||
|
||||
auto variant_discr_opt = to_variant.tryGetVariantDiscriminator(*removeNullableOrLowCardinalityNullable(from_type));
|
||||
/// Cast String to Variant through parsing if it's not Variant(String).
|
||||
if (isStringOrFixedString(removeNullable(removeLowCardinality(from_type))) && (!variant_discr_opt || to_variant.getVariants().size() > 1))
|
||||
return createStringToVariantWrapper();
|
||||
|
||||
if (!variant_discr_opt)
|
||||
throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Cannot convert type {} to {}. Conversion to Variant allowed only for types from this Variant", from_type->getName(), to_variant.getName());
|
||||
|
||||
@ -4692,7 +4726,7 @@ private:
|
||||
|
||||
if (to_type->getCustomSerialization() && to_type->getCustomName())
|
||||
{
|
||||
ret = [requested_result_is_nullable](
|
||||
ret = [this, requested_result_is_nullable](
|
||||
ColumnsWithTypeAndName & arguments,
|
||||
const DataTypePtr & result_type,
|
||||
const ColumnNullable * column_nullable,
|
||||
@ -4701,7 +4735,10 @@ private:
|
||||
auto wrapped_result_type = result_type;
|
||||
if (requested_result_is_nullable)
|
||||
wrapped_result_type = makeNullable(result_type);
|
||||
return ConvertImplGenericFromString::execute(
|
||||
if (this->cast_type == CastType::accurateOrNull)
|
||||
return ConvertImplGenericFromString<false>::execute(
|
||||
arguments, wrapped_result_type, column_nullable, input_rows_count);
|
||||
return ConvertImplGenericFromString<true>::execute(
|
||||
arguments, wrapped_result_type, column_nullable, input_rows_count);
|
||||
};
|
||||
return true;
|
||||
|
@ -1139,7 +1139,7 @@ private:
|
||||
getName());
|
||||
|
||||
auto dictionary = helper.getDictionary(arguments[0].column);
|
||||
const auto & hierarchical_attribute = helper.getDictionaryHierarchicalAttribute(dictionary);
|
||||
const auto & hierarchical_attribute = FunctionDictHelper::getDictionaryHierarchicalAttribute(dictionary);
|
||||
|
||||
return std::make_shared<DataTypeArray>(removeNullable(hierarchical_attribute.type));
|
||||
}
|
||||
@ -1150,7 +1150,7 @@ private:
|
||||
return result_type->createColumn();
|
||||
|
||||
auto dictionary = helper.getDictionary(arguments[0].column);
|
||||
const auto & hierarchical_attribute = helper.getDictionaryHierarchicalAttribute(dictionary);
|
||||
const auto & hierarchical_attribute = FunctionDictHelper::getDictionaryHierarchicalAttribute(dictionary);
|
||||
|
||||
auto key_column = ColumnWithTypeAndName{arguments[1].column, arguments[1].type, arguments[1].name};
|
||||
auto key_column_casted = castColumnAccurate(key_column, removeNullable(hierarchical_attribute.type));
|
||||
@ -1205,7 +1205,7 @@ private:
|
||||
return result_type->createColumn();
|
||||
|
||||
auto dictionary = helper.getDictionary(arguments[0].column);
|
||||
const auto & hierarchical_attribute = helper.getDictionaryHierarchicalAttribute(dictionary);
|
||||
const auto & hierarchical_attribute = FunctionDictHelper::getDictionaryHierarchicalAttribute(dictionary);
|
||||
|
||||
auto key_column = ColumnWithTypeAndName{arguments[1].column->convertToFullColumnIfConst(), arguments[1].type, arguments[2].name};
|
||||
auto in_key_column = ColumnWithTypeAndName{arguments[2].column->convertToFullColumnIfConst(), arguments[2].type, arguments[2].name};
|
||||
|
@ -144,7 +144,7 @@ struct NumericArraySource : public ArraySourceImpl<NumericArraySource<T>>
|
||||
#pragma clang diagnostic ignored "-Wsuggest-override"
|
||||
#pragma clang diagnostic ignored "-Wsuggest-destructor-override"
|
||||
|
||||
/// NOLINTBEGIN(hicpp-use-override)
|
||||
/// NOLINTBEGIN(hicpp-use-override, modernize-use-override)
|
||||
|
||||
template <typename Base>
|
||||
struct ConstSource : public Base
|
||||
@ -233,7 +233,7 @@ struct ConstSource : public Base
|
||||
}
|
||||
};
|
||||
|
||||
/// NOLINTEND(hicpp-use-override)
|
||||
/// NOLINTEND(hicpp-use-override, modernize-use-override)
|
||||
|
||||
#pragma clang diagnostic pop
|
||||
|
||||
|
@ -86,7 +86,7 @@ namespace
|
||||
auto & res = typeid_cast<ASTCreateFunctionQuery &>(*ptr);
|
||||
res.if_not_exists = false;
|
||||
res.or_replace = false;
|
||||
FunctionNameNormalizer().visit(res.function_core.get());
|
||||
FunctionNameNormalizer::visit(res.function_core.get());
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ ASTPtr normalizeCreateFunctionQuery(const IAST & create_function_query)
|
||||
auto & res = typeid_cast<ASTCreateFunctionQuery &>(*ptr);
|
||||
res.if_not_exists = false;
|
||||
res.or_replace = false;
|
||||
FunctionNameNormalizer().visit(res.function_core.get());
|
||||
FunctionNameNormalizer::visit(res.function_core.get());
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@ -70,8 +70,7 @@ namespace
|
||||
const char * src_pos = src;
|
||||
const char * src_end = src + src_size;
|
||||
char * dst_pos = dst;
|
||||
// perfect hashmap to lookup html character references
|
||||
HTMLCharacterHash hash;
|
||||
|
||||
// to hold char seq for lookup, reuse it
|
||||
std::vector<char> seq;
|
||||
while (true)
|
||||
@ -108,7 +107,7 @@ namespace
|
||||
// null terminate the sequence
|
||||
seq.push_back('\0');
|
||||
// lookup the html sequence in the perfect hashmap.
|
||||
const auto * res = hash.Lookup(seq.data(), strlen(seq.data()));
|
||||
const auto * res = HTMLCharacterHash::Lookup(seq.data(), strlen(seq.data()));
|
||||
// reset so that it's reused in the next iteration
|
||||
seq.clear();
|
||||
if (res)
|
||||
|
@ -83,7 +83,7 @@ public:
|
||||
|
||||
static ColumnWithTypeAndName createScalar(ContextPtr context_)
|
||||
{
|
||||
if (const auto * block = context_->tryGetSpecialScalar(Scalar::scalar_name))
|
||||
if (auto block = context_->tryGetSpecialScalar(Scalar::scalar_name))
|
||||
return block->getByPosition(0);
|
||||
else if (context_->hasQueryContext())
|
||||
{
|
||||
|
@ -321,6 +321,9 @@ public:
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
{
|
||||
if (input_rows_count == 0)
|
||||
return ColumnUInt64::create();
|
||||
|
||||
return selector.selectAndExecute(arguments, result_type, input_rows_count);
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ private:
|
||||
void startWritingFile();
|
||||
void endWritingFile();
|
||||
|
||||
std::unique_ptr<StreamInfo> stream_info TSA_GUARDED_BY(mutex) = nullptr;
|
||||
std::unique_ptr<StreamInfo> stream_info TSA_GUARDED_BY(mutex);
|
||||
bool is_writing_file TSA_GUARDED_BY(mutex) = false;
|
||||
bool finalized TSA_GUARDED_BY(mutex) = false;
|
||||
mutable std::mutex mutex;
|
||||
|
@ -26,7 +26,7 @@ void MMapReadBufferFromFileWithCache::init()
|
||||
MMapReadBufferFromFileWithCache::MMapReadBufferFromFileWithCache(
|
||||
MMappedFileCache & cache, const std::string & file_name, size_t offset, size_t length)
|
||||
{
|
||||
mapped = cache.getOrSet(cache.hash(file_name, offset, length), [&]
|
||||
mapped = cache.getOrSet(MMappedFileCache::hash(file_name, offset, length), [&]
|
||||
{
|
||||
return std::make_shared<MMappedFile>(file_name, offset, length);
|
||||
});
|
||||
|
@ -822,7 +822,7 @@ inline ReturnType readDateTextImpl(ExtendedDayNum & date, ReadBuffer & buf, cons
|
||||
return false;
|
||||
|
||||
/// When the parameter is out of rule or out of range, Date32 uses 1925-01-01 as the default value (-DateLUT::instance().getDayNumOffsetEpoch(), -16436) and Date uses 1970-01-01.
|
||||
date = date_lut.makeDayNum(local_date.year(), local_date.month(), local_date.day(), -static_cast<Int32>(date_lut.getDayNumOffsetEpoch()));
|
||||
date = date_lut.makeDayNum(local_date.year(), local_date.month(), local_date.day(), -static_cast<Int32>(DateLUTImpl::getDayNumOffsetEpoch()));
|
||||
return ReturnType(true);
|
||||
}
|
||||
|
||||
@ -1880,10 +1880,10 @@ struct PcgDeserializer
|
||||
assertChar(' ', buf);
|
||||
readText(state, buf);
|
||||
|
||||
if (multiplier != rng.multiplier())
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect multiplier in pcg32: expected {}, got {}", rng.multiplier(), multiplier);
|
||||
if (increment != rng.increment())
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect increment in pcg32: expected {}, got {}", rng.increment(), increment);
|
||||
if (multiplier != pcg32_fast::multiplier())
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect multiplier in pcg32: expected {}, got {}", pcg32_fast::multiplier(), multiplier);
|
||||
if (increment != pcg32_fast::increment())
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect increment in pcg32: expected {}, got {}", pcg32_fast::increment(), increment);
|
||||
|
||||
rng.state_ = state;
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ URI::URI(const std::string & uri_)
|
||||
else
|
||||
{
|
||||
mapper["s3"] = "https://{bucket}.s3.amazonaws.com";
|
||||
mapper["gs"] = "https://{bucket}.storage.googleapis.com";
|
||||
mapper["gs"] = "https://storage.googleapis.com/{bucket}";
|
||||
mapper["oss"] = "https://{bucket}.oss.aliyuncs.com";
|
||||
}
|
||||
|
||||
|
@ -1390,9 +1390,9 @@ struct PcgSerializer
|
||||
{
|
||||
static void serializePcg32(const pcg32_fast & rng, WriteBuffer & buf)
|
||||
{
|
||||
writeText(rng.multiplier(), buf);
|
||||
writeText(pcg32_fast::multiplier(), buf);
|
||||
writeChar(' ', buf);
|
||||
writeText(rng.increment(), buf);
|
||||
writeText(pcg32_fast::increment(), buf);
|
||||
writeChar(' ', buf);
|
||||
writeText(rng.state_, buf);
|
||||
}
|
||||
|
@ -294,7 +294,7 @@ void AsynchronousInsertQueue::preprocessInsertQuery(const ASTPtr & query, const
|
||||
|
||||
InterpreterInsertQuery interpreter(query, query_context, query_context->getSettingsRef().insert_allow_materialized_columns);
|
||||
auto table = interpreter.getTable(insert_query);
|
||||
auto sample_block = interpreter.getSampleBlock(insert_query, table, table->getInMemoryMetadataPtr(), query_context);
|
||||
auto sample_block = InterpreterInsertQuery::getSampleBlock(insert_query, table, table->getInMemoryMetadataPtr(), query_context);
|
||||
|
||||
if (!FormatFactory::instance().isInputFormat(insert_query.format))
|
||||
throw Exception(ErrorCodes::UNKNOWN_FORMAT, "Unknown input format {}", insert_query.format);
|
||||
|
@ -90,9 +90,15 @@ FileCache::FileCache(const std::string & cache_name, const FileCacheSettings & s
|
||||
, metadata(settings.base_path, settings.background_download_queue_size_limit, settings.background_download_threads, write_cache_per_user_directory)
|
||||
{
|
||||
if (settings.cache_policy == "LRU")
|
||||
main_priority = std::make_unique<LRUFileCachePriority>(settings.max_size, settings.max_elements);
|
||||
{
|
||||
main_priority = std::make_unique<LRUFileCachePriority>(
|
||||
settings.max_size, settings.max_elements, nullptr, cache_name);
|
||||
}
|
||||
else if (settings.cache_policy == "SLRU")
|
||||
main_priority = std::make_unique<SLRUFileCachePriority>(settings.max_size, settings.max_elements, settings.slru_size_ratio);
|
||||
{
|
||||
main_priority = std::make_unique<SLRUFileCachePriority>(
|
||||
settings.max_size, settings.max_elements, settings.slru_size_ratio, nullptr, nullptr, cache_name);
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown cache policy: {}", settings.cache_policy);
|
||||
|
||||
|
@ -192,8 +192,8 @@ protected:
|
||||
|
||||
virtual void releaseImpl(size_t /* size */, size_t /* elements */) {}
|
||||
|
||||
size_t max_size = 0;
|
||||
size_t max_elements = 0;
|
||||
std::atomic<size_t> max_size = 0;
|
||||
std::atomic<size_t> max_elements = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -126,6 +126,9 @@ void LRUFileCachePriority::updateSize(int64_t size)
|
||||
chassert(size != 0);
|
||||
chassert(size > 0 || state->current_size >= size_t(-size));
|
||||
|
||||
LOG_TEST(log, "Updating size with {}, current is {}",
|
||||
size, state->current_size);
|
||||
|
||||
state->current_size += size;
|
||||
CurrentMetrics::add(CurrentMetrics::FilesystemCacheSize, size);
|
||||
}
|
||||
@ -444,12 +447,15 @@ void LRUFileCachePriority::LRUIterator::invalidate()
|
||||
assertValid();
|
||||
|
||||
const auto & entry = *iterator;
|
||||
LOG_TEST(cache_priority->log,
|
||||
"Invalidating entry in LRU queue entry {}", entry->toString());
|
||||
|
||||
chassert(entry->size != 0);
|
||||
cache_priority->updateSize(-entry->size);
|
||||
cache_priority->updateElementsCount(-1);
|
||||
|
||||
LOG_TEST(cache_priority->log,
|
||||
"Invalidated entry in LRU queue {}: {}",
|
||||
entry->toString(), cache_priority->getApproxStateInfoForLog());
|
||||
|
||||
entry->size = 0;
|
||||
}
|
||||
|
||||
@ -526,6 +532,12 @@ std::string LRUFileCachePriority::getStateInfoForLog(const CachePriorityGuard::L
|
||||
getSize(lock), max_size, getElementsCount(lock), max_elements, description);
|
||||
}
|
||||
|
||||
std::string LRUFileCachePriority::getApproxStateInfoForLog() const
|
||||
{
|
||||
return fmt::format("size: {}/{}, elements: {}/{} (description: {})",
|
||||
getSizeApprox(), max_size, getElementsCountApprox(), max_elements, description);
|
||||
}
|
||||
|
||||
void LRUFileCachePriority::holdImpl(
|
||||
size_t size,
|
||||
size_t elements,
|
||||
|
@ -116,6 +116,7 @@ private:
|
||||
const CachePriorityGuard::Lock & lock) override;
|
||||
|
||||
void releaseImpl(size_t size, size_t elements) override;
|
||||
std::string getApproxStateInfoForLog() const;
|
||||
};
|
||||
|
||||
class LRUFileCachePriority::LRUIterator : public IFileCachePriority::Iterator
|
||||
|
@ -28,17 +28,19 @@ SLRUFileCachePriority::SLRUFileCachePriority(
|
||||
size_t max_elements_,
|
||||
double size_ratio_,
|
||||
LRUFileCachePriority::StatePtr probationary_state_,
|
||||
LRUFileCachePriority::StatePtr protected_state_)
|
||||
LRUFileCachePriority::StatePtr protected_state_,
|
||||
const std::string & description_)
|
||||
: IFileCachePriority(max_size_, max_elements_)
|
||||
, size_ratio(size_ratio_)
|
||||
, protected_queue(LRUFileCachePriority(getRatio(max_size_, size_ratio),
|
||||
getRatio(max_elements_, size_ratio),
|
||||
protected_state_,
|
||||
"protected"))
|
||||
description_ + ", protected"))
|
||||
, probationary_queue(LRUFileCachePriority(getRatio(max_size_, 1 - size_ratio),
|
||||
getRatio(max_elements_, 1 - size_ratio),
|
||||
probationary_state_,
|
||||
"probationary"))
|
||||
description_ + ", probationary"))
|
||||
, log(getLogger("SLRUFileCachePriority(" + description_ + ")"))
|
||||
{
|
||||
LOG_DEBUG(
|
||||
log, "Probationary queue {} in size and {} in elements. "
|
||||
|
@ -19,7 +19,8 @@ public:
|
||||
size_t max_elements_,
|
||||
double size_ratio_,
|
||||
LRUFileCachePriority::StatePtr probationary_state_ = nullptr,
|
||||
LRUFileCachePriority::StatePtr protected_state_ = nullptr);
|
||||
LRUFileCachePriority::StatePtr protected_state_ = nullptr,
|
||||
const std::string & description_ = "none");
|
||||
|
||||
size_t getSize(const CachePriorityGuard::Lock & lock) const override;
|
||||
|
||||
@ -67,7 +68,7 @@ private:
|
||||
double size_ratio;
|
||||
LRUFileCachePriority protected_queue;
|
||||
LRUFileCachePriority probationary_queue;
|
||||
LoggerPtr log = getLogger("SLRUFileCachePriority");
|
||||
LoggerPtr log;
|
||||
|
||||
void increasePriority(SLRUIterator & iterator, const CachePriorityGuard::Lock & lock);
|
||||
|
||||
|
@ -971,7 +971,7 @@ Strings Context::getWarnings() const
|
||||
/// TODO: remove, use `getTempDataOnDisk`
|
||||
VolumePtr Context::getGlobalTemporaryVolume() const
|
||||
{
|
||||
std::lock_guard lock(shared->mutex);
|
||||
SharedLockGuard lock(shared->mutex);
|
||||
/// Calling this method we just bypass the `temp_data_on_disk` and write to the file on the volume directly.
|
||||
/// Volume is the same for `root_temp_data_on_disk` (always set) and `temp_data_on_disk` (if it's set).
|
||||
if (shared->root_temp_data_on_disk)
|
||||
@ -1550,14 +1550,17 @@ ClassifierPtr Context::getWorkloadClassifier() const
|
||||
}
|
||||
|
||||
|
||||
const Scalars & Context::getScalars() const
|
||||
Scalars Context::getScalars() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return scalars;
|
||||
}
|
||||
|
||||
|
||||
const Block & Context::getScalar(const String & name) const
|
||||
Block Context::getScalar(const String & name) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
auto it = scalars.find(name);
|
||||
if (scalars.end() == it)
|
||||
{
|
||||
@ -1568,12 +1571,13 @@ const Block & Context::getScalar(const String & name) const
|
||||
return it->second;
|
||||
}
|
||||
|
||||
const Block * Context::tryGetSpecialScalar(const String & name) const
|
||||
std::optional<Block> Context::tryGetSpecialScalar(const String & name) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = special_scalars.find(name);
|
||||
if (special_scalars.end() == it)
|
||||
return nullptr;
|
||||
return &it->second;
|
||||
return std::nullopt;
|
||||
return it->second;
|
||||
}
|
||||
|
||||
Tables Context::getExternalTables() const
|
||||
@ -1653,6 +1657,7 @@ void Context::addScalar(const String & name, const Block & block)
|
||||
if (isGlobalContext())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have scalars");
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
scalars[name] = block;
|
||||
}
|
||||
|
||||
@ -1662,6 +1667,7 @@ void Context::addSpecialScalar(const String & name, const Block & block)
|
||||
if (isGlobalContext())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have local scalars");
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
special_scalars[name] = block;
|
||||
}
|
||||
|
||||
@ -1671,6 +1677,7 @@ bool Context::hasScalar(const String & name) const
|
||||
if (isGlobalContext())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have scalars");
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
return scalars.contains(name);
|
||||
}
|
||||
|
||||
|
@ -680,12 +680,12 @@ public:
|
||||
std::shared_ptr<TemporaryTableHolder> findExternalTable(const String & table_name) const;
|
||||
std::shared_ptr<TemporaryTableHolder> removeExternalTable(const String & table_name);
|
||||
|
||||
const Scalars & getScalars() const;
|
||||
const Block & getScalar(const String & name) const;
|
||||
Scalars getScalars() const;
|
||||
Block getScalar(const String & name) const;
|
||||
void addScalar(const String & name, const Block & block);
|
||||
bool hasScalar(const String & name) const;
|
||||
|
||||
const Block * tryGetSpecialScalar(const String & name) const;
|
||||
std::optional<Block> tryGetSpecialScalar(const String & name) const;
|
||||
void addSpecialScalar(const String & name, const Block & block);
|
||||
|
||||
const QueryAccessInfo & getQueryAccessInfo() const { return *getQueryAccessInfoPtr(); }
|
||||
|
@ -112,7 +112,7 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr
|
||||
auto hash = subquery.getTreeHash(/*ignore_aliases=*/ true);
|
||||
const auto scalar_query_hash_str = toString(hash);
|
||||
|
||||
std::unique_ptr<InterpreterSelectWithUnionQuery> interpreter = nullptr;
|
||||
std::unique_ptr<InterpreterSelectWithUnionQuery> interpreter;
|
||||
bool hit = false;
|
||||
bool is_local = false;
|
||||
|
||||
|
@ -21,7 +21,7 @@ namespace ErrorCodes
|
||||
|
||||
BlockIO InterpreterCreateFunctionQuery::execute()
|
||||
{
|
||||
FunctionNameNormalizer().visit(query_ptr.get());
|
||||
FunctionNameNormalizer::visit(query_ptr.get());
|
||||
const auto updated_query_ptr = removeOnClusterClauseIfNeeded(query_ptr, getContext());
|
||||
ASTCreateFunctionQuery & create_function_query = updated_query_ptr->as<ASTCreateFunctionQuery &>();
|
||||
|
||||
|
@ -39,12 +39,12 @@ BlockIO InterpreterCreateIndexQuery::execute()
|
||||
|
||||
}
|
||||
// Noop if allow_create_index_without_type = true. throw otherwise
|
||||
if (!create_index.index_decl->as<ASTIndexDeclaration>()->type)
|
||||
if (!create_index.index_decl->as<ASTIndexDeclaration>()->getType())
|
||||
{
|
||||
if (!current_context->getSettingsRef().allow_create_index_without_type)
|
||||
{
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "CREATE INDEX without TYPE is forbidden."
|
||||
" SET allow_create_index_without_type=1 to ignore this statements.");
|
||||
" SET allow_create_index_without_type=1 to ignore this statements");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ InterpreterDeleteQuery::InterpreterDeleteQuery(const ASTPtr & query_ptr_, Contex
|
||||
|
||||
BlockIO InterpreterDeleteQuery::execute()
|
||||
{
|
||||
FunctionNameNormalizer().visit(query_ptr.get());
|
||||
FunctionNameNormalizer::visit(query_ptr.get());
|
||||
const ASTDeleteQuery & delete_query = query_ptr->as<ASTDeleteQuery &>();
|
||||
auto table_id = getContext()->resolveStorageID(delete_query, Context::ResolveOrdinary);
|
||||
|
||||
|
@ -21,7 +21,7 @@ namespace ErrorCodes
|
||||
|
||||
BlockIO InterpreterDropFunctionQuery::execute()
|
||||
{
|
||||
FunctionNameNormalizer().visit(query_ptr.get());
|
||||
FunctionNameNormalizer::visit(query_ptr.get());
|
||||
|
||||
const auto updated_query_ptr = removeOnClusterClauseIfNeeded(query_ptr, getContext());
|
||||
ASTDropFunctionQuery & drop_function_query = updated_query_ptr->as<ASTDropFunctionQuery &>();
|
||||
|
@ -412,8 +412,8 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
if (!options.is_subquery)
|
||||
{
|
||||
if (context->getSettingsRef().enable_global_with_statement)
|
||||
ApplyWithAliasVisitor().visit(query_ptr);
|
||||
ApplyWithSubqueryVisitor().visit(query_ptr);
|
||||
ApplyWithAliasVisitor::visit(query_ptr);
|
||||
ApplyWithSubqueryVisitor::visit(query_ptr);
|
||||
}
|
||||
|
||||
query_info.query = query_ptr->clone();
|
||||
@ -609,7 +609,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
if (view)
|
||||
{
|
||||
query_info.is_parameterized_view = view->isParameterizedView();
|
||||
view->replaceWithSubquery(getSelectQuery(), view_table, metadata_snapshot, view->isParameterizedView());
|
||||
StorageView::replaceWithSubquery(getSelectQuery(), view_table, metadata_snapshot, view->isParameterizedView());
|
||||
}
|
||||
|
||||
syntax_analyzer_result = TreeRewriter(context).analyzeSelect(
|
||||
@ -629,7 +629,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
if (view)
|
||||
{
|
||||
/// Restore original view name. Save rewritten subquery for future usage in StorageView.
|
||||
query_info.view_query = view->restoreViewName(getSelectQuery(), view_table);
|
||||
query_info.view_query = StorageView::restoreViewName(getSelectQuery(), view_table);
|
||||
view = nullptr;
|
||||
}
|
||||
|
||||
@ -2434,7 +2434,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc
|
||||
agg_count.create(place);
|
||||
SCOPE_EXIT_MEMORY_SAFE(agg_count.destroy(place));
|
||||
|
||||
agg_count.set(place, *num_rows);
|
||||
AggregateFunctionCount::set(place, *num_rows);
|
||||
|
||||
auto column = ColumnAggregateFunction::create(func);
|
||||
column->insertFrom(place);
|
||||
|
@ -498,14 +498,12 @@ ASTs InterpreterCreateImpl::getRewrittenQueries(
|
||||
columns->columns->children.emplace_back(create_materialized_column_declaration(version_column_name, "UInt64", UInt64(1)));
|
||||
|
||||
/// Add minmax skipping index for _version column.
|
||||
auto version_index = std::make_shared<ASTIndexDeclaration>();
|
||||
version_index->name = version_column_name;
|
||||
auto index_expr = std::make_shared<ASTIdentifier>(version_column_name);
|
||||
auto index_type = makeASTFunction("minmax");
|
||||
index_type->no_empty_args = true;
|
||||
version_index->set(version_index->expr, index_expr);
|
||||
version_index->set(version_index->type, index_type);
|
||||
auto version_index = std::make_shared<ASTIndexDeclaration>(index_expr, index_type, version_column_name);
|
||||
version_index->granularity = 1;
|
||||
|
||||
ASTPtr indices = std::make_shared<ASTExpressionList>();
|
||||
indices->children.push_back(version_index);
|
||||
columns->set(columns->indices, indices);
|
||||
|
@ -1587,7 +1587,7 @@ void TreeRewriter::normalize(
|
||||
/// already normalized on initiator node, or not normalized and should remain unnormalized for
|
||||
/// compatibility.
|
||||
if (context_->getClientInfo().query_kind != ClientInfo::QueryKind::SECONDARY_QUERY && settings.normalize_function_names)
|
||||
FunctionNameNormalizer().visit(query.get());
|
||||
FunctionNameNormalizer::visit(query.get());
|
||||
|
||||
if (settings.optimize_move_to_prewhere)
|
||||
{
|
||||
|
@ -73,7 +73,7 @@ std::optional<EvaluateConstantExpressionResult> evaluateConstantExpressionImpl(c
|
||||
/// already normalized on initiator node, or not normalized and should remain unnormalized for
|
||||
/// compatibility.
|
||||
if (context->getClientInfo().query_kind != ClientInfo::QueryKind::SECONDARY_QUERY && context->getSettingsRef().normalize_function_names)
|
||||
FunctionNameNormalizer().visit(ast.get());
|
||||
FunctionNameNormalizer::visit(ast.get());
|
||||
|
||||
auto syntax_result = TreeRewriter(context, no_throw).analyze(ast, source_columns);
|
||||
if (!syntax_result)
|
||||
|
@ -8,24 +8,63 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ASTPtr ASTIndexDeclaration::clone() const
|
||||
namespace ErrorCodes
|
||||
{
|
||||
auto res = std::make_shared<ASTIndexDeclaration>();
|
||||
|
||||
res->name = name;
|
||||
if (granularity)
|
||||
res->granularity = granularity;
|
||||
if (expr)
|
||||
res->set(res->expr, expr->clone());
|
||||
if (type)
|
||||
res->set(res->type, type->clone());
|
||||
return res;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
ASTIndexDeclaration::ASTIndexDeclaration(ASTPtr expression, ASTPtr type, const String & name_)
|
||||
: name(name_)
|
||||
{
|
||||
if (!expression)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Index declaration must have an expression");
|
||||
children.push_back(expression);
|
||||
|
||||
if (type)
|
||||
{
|
||||
if (!dynamic_cast<const ASTFunction *>(type.get()))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Index declaration type must be a function");
|
||||
children.push_back(type);
|
||||
}
|
||||
}
|
||||
|
||||
ASTPtr ASTIndexDeclaration::clone() const
|
||||
{
|
||||
ASTPtr expr = getExpression();
|
||||
if (expr)
|
||||
expr = expr->clone();
|
||||
|
||||
ASTPtr type = getType();
|
||||
if (type)
|
||||
type = type->clone();
|
||||
|
||||
auto res = std::make_shared<ASTIndexDeclaration>(expr, type, name);
|
||||
res->granularity = granularity;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
ASTPtr ASTIndexDeclaration::getExpression() const
|
||||
{
|
||||
if (children.size() <= expression_idx)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Index declaration must have an expression");
|
||||
return children[expression_idx];
|
||||
}
|
||||
|
||||
std::shared_ptr<ASTFunction> ASTIndexDeclaration::getType() const
|
||||
{
|
||||
if (children.size() <= type_idx)
|
||||
return nullptr;
|
||||
auto func_ast = std::dynamic_pointer_cast<ASTFunction>(children[type_idx]);
|
||||
if (!func_ast)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Index declaration type must be a function");
|
||||
return func_ast;
|
||||
}
|
||||
|
||||
void ASTIndexDeclaration::formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
if (expr)
|
||||
if (auto expr = getExpression())
|
||||
{
|
||||
if (part_of_create_index_query)
|
||||
{
|
||||
@ -46,11 +85,12 @@ void ASTIndexDeclaration::formatImpl(const FormatSettings & s, FormatState & sta
|
||||
}
|
||||
}
|
||||
|
||||
if (type)
|
||||
if (auto type = getType())
|
||||
{
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << " TYPE " << (s.hilite ? hilite_none : "");
|
||||
type->formatImpl(s, state, frame);
|
||||
}
|
||||
|
||||
if (granularity)
|
||||
{
|
||||
s.ostr << (s.hilite ? hilite_keyword : "") << " GRANULARITY " << (s.hilite ? hilite_none : "");
|
||||
|
@ -16,9 +16,9 @@ public:
|
||||
static const auto DEFAULT_ANNOY_INDEX_GRANULARITY = 100'000'000uz;
|
||||
static const auto DEFAULT_USEARCH_INDEX_GRANULARITY = 100'000'000uz;
|
||||
|
||||
ASTIndexDeclaration(ASTPtr expression, ASTPtr type, const String & name_);
|
||||
|
||||
String name;
|
||||
IAST * expr;
|
||||
ASTFunction * type;
|
||||
UInt64 granularity;
|
||||
bool part_of_create_index_query = false;
|
||||
|
||||
@ -28,11 +28,12 @@ public:
|
||||
ASTPtr clone() const override;
|
||||
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
||||
void forEachPointerToChild(std::function<void(void**)> f) override
|
||||
{
|
||||
f(reinterpret_cast<void **>(&expr));
|
||||
f(reinterpret_cast<void **>(&type));
|
||||
}
|
||||
ASTPtr getExpression() const;
|
||||
std::shared_ptr<ASTFunction> getType() const;
|
||||
|
||||
private:
|
||||
static constexpr size_t expression_idx = 0;
|
||||
static constexpr size_t type_idx = 1;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -54,19 +54,18 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected
|
||||
return false;
|
||||
}
|
||||
|
||||
auto index = std::make_shared<ASTIndexDeclaration>();
|
||||
/// name is set below in ParserCreateIndexQuery
|
||||
auto index = std::make_shared<ASTIndexDeclaration>(expr, type, "");
|
||||
index->part_of_create_index_query = true;
|
||||
index->set(index->expr, expr);
|
||||
if (type)
|
||||
index->set(index->type, type);
|
||||
|
||||
if (granularity)
|
||||
index->granularity = granularity->as<ASTLiteral &>().value.safeGet<UInt64>();
|
||||
else
|
||||
{
|
||||
if (index->type && index->type->name == "annoy")
|
||||
auto index_type = index->getType();
|
||||
if (index_type && index_type->name == "annoy")
|
||||
index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY;
|
||||
else if (index->type && index->type->name == "usearch")
|
||||
else if (index_type && index_type->name == "usearch")
|
||||
index->granularity = ASTIndexDeclaration::DEFAULT_USEARCH_INDEX_GRANULARITY;
|
||||
else
|
||||
index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY;
|
||||
|
@ -205,18 +205,16 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
|
||||
return false;
|
||||
}
|
||||
|
||||
auto index = std::make_shared<ASTIndexDeclaration>();
|
||||
index->name = name->as<ASTIdentifier &>().name();
|
||||
index->set(index->expr, expr);
|
||||
index->set(index->type, type);
|
||||
auto index = std::make_shared<ASTIndexDeclaration>(expr, type, name->as<ASTIdentifier &>().name());
|
||||
|
||||
if (granularity)
|
||||
index->granularity = granularity->as<ASTLiteral &>().value.safeGet<UInt64>();
|
||||
else
|
||||
{
|
||||
if (index->type->name == "annoy")
|
||||
auto index_type = index->getType();
|
||||
if (index_type->name == "annoy")
|
||||
index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY;
|
||||
else if (index->type->name == "usearch")
|
||||
else if (index_type->name == "usearch")
|
||||
index->granularity = ASTIndexDeclaration::DEFAULT_USEARCH_INDEX_GRANULARITY;
|
||||
else
|
||||
index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY;
|
||||
|
@ -169,7 +169,7 @@ public:
|
||||
{
|
||||
const auto & in_first_argument_node = function_node.getArguments().getNodes().at(0);
|
||||
const auto & in_second_argument_node = function_node.getArguments().getNodes().at(1);
|
||||
in_function_second_argument_node_name = planner_context.createSetKey(in_first_argument_node->getResultType(), in_second_argument_node);
|
||||
in_function_second_argument_node_name = PlannerContext::createSetKey(in_first_argument_node->getResultType(), in_second_argument_node);
|
||||
}
|
||||
|
||||
WriteBufferFromOwnString buffer;
|
||||
|
@ -305,7 +305,7 @@ bool applyTrivialCountIfPossible(
|
||||
AggregateDataPtr place = state.data();
|
||||
agg_count.create(place);
|
||||
SCOPE_EXIT_MEMORY_SAFE(agg_count.destroy(place));
|
||||
agg_count.set(place, num_rows.value());
|
||||
AggregateFunctionCount::set(place, num_rows.value());
|
||||
|
||||
auto column = ColumnAggregateFunction::create(function_node.getAggregateFunction());
|
||||
column->insertFrom(place);
|
||||
|
@ -121,7 +121,7 @@ void ExecutorTasks::pushTasks(Queue & queue, Queue & async_queue, ExecutionThrea
|
||||
|
||||
/// Take local task from queue if has one.
|
||||
if (!queue.empty() && !context.hasAsyncTasks()
|
||||
&& context.num_scheduled_local_tasks < context.max_scheduled_local_tasks)
|
||||
&& context.num_scheduled_local_tasks < ExecutionThreadContext::max_scheduled_local_tasks)
|
||||
{
|
||||
++context.num_scheduled_local_tasks;
|
||||
context.setTask(queue.front());
|
||||
|
@ -82,7 +82,7 @@ public:
|
||||
|
||||
std::string getContentType() const override
|
||||
{
|
||||
return writer.getContentType();
|
||||
return NativeWriter::getContentType();
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -76,7 +76,7 @@ inline static const Patterns & selectPatternsForMetricType(const Graphite::Param
|
||||
if (params.patterns_typed)
|
||||
{
|
||||
std::string_view path_view = path;
|
||||
if (path_view.find("?"sv) == path_view.npos)
|
||||
if (path_view.find("?"sv) == std::string::npos)
|
||||
return params.patterns_plain;
|
||||
else
|
||||
return params.patterns_tagged;
|
||||
|
@ -31,7 +31,7 @@ std::vector<std::pair<String, String>> describeJoinActions(const JoinPtr & join)
|
||||
description.emplace_back("ASOF inequality", toString(table_join.getAsofInequality()));
|
||||
|
||||
if (!table_join.getClauses().empty())
|
||||
description.emplace_back("Clauses", table_join.formatClauses(table_join.getClauses(), true /*short_format*/));
|
||||
description.emplace_back("Clauses", TableJoin::formatClauses(table_join.getClauses(), true /*short_format*/));
|
||||
|
||||
return description;
|
||||
}
|
||||
|
@ -1408,8 +1408,8 @@ static void buildIndexes(
|
||||
if (metadata_snapshot->hasPartitionKey())
|
||||
{
|
||||
const auto & partition_key = metadata_snapshot->getPartitionKey();
|
||||
auto minmax_columns_names = data.getMinMaxColumnsNames(partition_key);
|
||||
auto minmax_expression_actions = data.getMinMaxExpr(partition_key, ExpressionActionsSettings::fromContext(context));
|
||||
auto minmax_columns_names = MergeTreeData::getMinMaxColumnsNames(partition_key);
|
||||
auto minmax_expression_actions = MergeTreeData::getMinMaxExpr(partition_key, ExpressionActionsSettings::fromContext(context));
|
||||
|
||||
indexes->minmax_idx_condition.emplace(filter_actions_dag, context, minmax_columns_names, minmax_expression_actions);
|
||||
indexes->partition_pruner.emplace(metadata_snapshot, filter_actions_dag, context, false /* strict */);
|
||||
|
@ -77,7 +77,7 @@ void readHeaders(
|
||||
skipToNextLineOrEOF(in);
|
||||
|
||||
Poco::trimRightInPlace(value);
|
||||
headers.add(name, headers.decodeWord(value));
|
||||
headers.add(name, Poco::Net::MessageHeader::decodeWord(value));
|
||||
++fields;
|
||||
}
|
||||
}
|
||||
|
@ -1143,7 +1143,7 @@ void AlterCommands::apply(StorageInMemoryMetadata & metadata, ContextPtr context
|
||||
{
|
||||
auto minmax_columns = metadata_copy.getColumnsRequiredForPartitionKey();
|
||||
auto partition_key = metadata_copy.partition_key.expression_list_ast->clone();
|
||||
FunctionNameNormalizer().visit(partition_key.get());
|
||||
FunctionNameNormalizer::visit(partition_key.get());
|
||||
auto primary_key_asts = metadata_copy.primary_key.expression_list_ast->children;
|
||||
metadata_copy.minmax_count_projection.emplace(ProjectionDescription::getMinMaxCountProjection(
|
||||
metadata_copy.columns, partition_key, minmax_columns, primary_key_asts, context));
|
||||
|
@ -613,7 +613,7 @@ IColumn::Selector DistributedSink::createSelector(const Block & source_block) co
|
||||
|
||||
const auto & key_column = current_block_with_sharding_key_expr.getByName(storage.getShardingKeyColumnName());
|
||||
|
||||
return storage.createSelector(cluster, key_column);
|
||||
return StorageDistributed::createSelector(cluster, key_column);
|
||||
}
|
||||
|
||||
|
||||
|
@ -177,7 +177,7 @@ private:
|
||||
};
|
||||
std::shared_ptr<TaskContext> task;
|
||||
|
||||
std::unique_ptr<FileLogDirectoryWatcher> directory_watch = nullptr;
|
||||
std::unique_ptr<FileLogDirectoryWatcher> directory_watch;
|
||||
|
||||
void loadFiles();
|
||||
|
||||
|
@ -85,22 +85,23 @@ IndexDescription IndexDescription::getIndexFromAST(const ASTPtr & definition_ast
|
||||
if (index_definition->name.empty())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Skip index must have name in definition.");
|
||||
|
||||
if (!index_definition->type)
|
||||
auto index_type = index_definition->getType();
|
||||
if (!index_type)
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "TYPE is required for index");
|
||||
|
||||
if (index_definition->type->parameters && !index_definition->type->parameters->children.empty())
|
||||
if (index_type->parameters && !index_type->parameters->children.empty())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Index type cannot have parameters");
|
||||
|
||||
IndexDescription result;
|
||||
result.definition_ast = index_definition->clone();
|
||||
result.name = index_definition->name;
|
||||
result.type = Poco::toLower(index_definition->type->name);
|
||||
result.type = Poco::toLower(index_type->name);
|
||||
result.granularity = index_definition->granularity;
|
||||
|
||||
ASTPtr expr_list;
|
||||
if (index_definition->expr)
|
||||
if (auto index_expression = index_definition->getExpression())
|
||||
{
|
||||
expr_list = extractKeyExpressionList(index_definition->expr->clone());
|
||||
expr_list = extractKeyExpressionList(index_expression);
|
||||
|
||||
ReplaceAliasToExprVisitor::Data data{columns};
|
||||
ReplaceAliasToExprVisitor{data}.visit(expr_list);
|
||||
@ -125,12 +126,11 @@ IndexDescription IndexDescription::getIndexFromAST(const ASTPtr & definition_ast
|
||||
result.data_types.push_back(elem.type);
|
||||
}
|
||||
|
||||
const auto & definition_arguments = index_definition->type->arguments;
|
||||
if (definition_arguments)
|
||||
if (index_type && index_type->arguments)
|
||||
{
|
||||
for (size_t i = 0; i < definition_arguments->children.size(); ++i)
|
||||
for (size_t i = 0; i < index_type->arguments->children.size(); ++i)
|
||||
{
|
||||
const auto * argument = definition_arguments->children[i]->as<ASTLiteral>();
|
||||
const auto * argument = index_type->arguments->children[i]->as<ASTLiteral>();
|
||||
if (!argument)
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "Only literals can be skip index arguments");
|
||||
result.arguments.emplace_back(argument->value);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user