Merge branch 'master' into keeper-prometheus

This commit is contained in:
Antonio Andelic 2022-11-24 12:12:08 +01:00 committed by GitHub
commit a6f38cb1cc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
256 changed files with 5644 additions and 1069 deletions

View File

@ -442,8 +442,9 @@ elseif (OS_DARWIN)
include(cmake/darwin/default_libs.cmake)
elseif (OS_FREEBSD)
include(cmake/freebsd/default_libs.cmake)
else()
link_libraries(global-group)
endif ()
link_libraries(global-group)
if (NOT (OS_LINUX OR OS_DARWIN))
# Using system libs can cause a lot of warnings in includes (on macro expansion).
@ -592,7 +593,7 @@ add_subdirectory (programs)
add_subdirectory (tests)
add_subdirectory (utils)
include (cmake/sanitize_target_link_libraries.cmake)
include (cmake/sanitize_targets.cmake)
# Build native targets if necessary
get_property(NATIVE_BUILD_TARGETS GLOBAL PROPERTY NATIVE_BUILD_TARGETS)

View File

@ -220,13 +220,13 @@ struct statx {
uint32_t stx_dev_minor;
uint64_t spare[14];
};
#endif
int statx(int fd, const char *restrict path, int flag,
unsigned int mask, struct statx *restrict statxbuf)
{
return syscall(SYS_statx, fd, path, flag, mask, statxbuf);
}
#endif
#include <syscall.h>

View File

@ -23,6 +23,7 @@ set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
include (cmake/cxx.cmake)
link_libraries(global-group)
target_link_libraries(global-group INTERFACE
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>

View File

@ -24,6 +24,7 @@ find_package(Threads REQUIRED)
include (cmake/unwind.cmake)
include (cmake/cxx.cmake)
link_libraries(global-group)
target_link_libraries(global-group INTERFACE
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>

View File

@ -34,6 +34,13 @@ set(CMAKE_C_STANDARD_LIBRARIES ${DEFAULT_LIBS})
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
include (cmake/unwind.cmake)
include (cmake/cxx.cmake)
# Delay the call to link the global interface after the libc++ libraries are included to avoid circular dependencies
# which are ok with static libraries but not with dynamic ones
link_libraries(global-group)
if (NOT OS_ANDROID)
if (NOT USE_MUSL)
# Our compatibility layer doesn't build under Android, many errors in musl.
@ -42,9 +49,6 @@ if (NOT OS_ANDROID)
add_subdirectory(base/harmful)
endif ()
include (cmake/unwind.cmake)
include (cmake/cxx.cmake)
target_link_libraries(global-group INTERFACE
-Wl,--start-group
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>

View File

@ -1,3 +1,13 @@
# https://stackoverflow.com/a/62311397/328260
macro (get_all_targets_recursive targets dir)
get_property (subdirectories DIRECTORY ${dir} PROPERTY SUBDIRECTORIES)
foreach (subdir ${subdirectories})
get_all_targets_recursive (${targets} ${subdir})
endforeach ()
get_property (current_targets DIRECTORY ${dir} PROPERTY BUILDSYSTEM_TARGETS)
list (APPEND ${targets} ${current_targets})
endmacro ()
# When you will try to link target with the directory (that exists), cmake will
# skip this without an error, only the following warning will be reported:
#
@ -18,23 +28,12 @@
# -- but cannot be used with link_libraries()
# - use BUILDSYSTEM_TARGETS property to get list of all targets and sanitize
# -- this will work.
# https://stackoverflow.com/a/62311397/328260
function (get_all_targets var)
set (targets)
get_all_targets_recursive (targets ${CMAKE_CURRENT_SOURCE_DIR})
set (${var} ${targets} PARENT_SCOPE)
endfunction()
macro (get_all_targets_recursive targets dir)
get_property (subdirectories DIRECTORY ${dir} PROPERTY SUBDIRECTORIES)
foreach (subdir ${subdirectories})
get_all_targets_recursive (${targets} ${subdir})
endforeach ()
get_property (current_targets DIRECTORY ${dir} PROPERTY BUILDSYSTEM_TARGETS)
list (APPEND ${targets} ${current_targets})
endmacro ()
macro (sanitize_link_libraries target)
function (sanitize_link_libraries target)
get_target_property(target_type ${target} TYPE)
if (${target_type} STREQUAL "INTERFACE_LIBRARY")
get_property(linked_libraries TARGET ${target} PROPERTY INTERFACE_LINK_LIBRARIES)
@ -48,9 +47,35 @@ macro (sanitize_link_libraries target)
message(FATAL_ERROR "${target} requested to link with directory: ${linked_library}")
endif()
endforeach()
endmacro()
endfunction()
get_all_targets (all_targets)
foreach (target ${all_targets})
sanitize_link_libraries(${target})
endforeach()
#
# Do not allow to define -W* from contrib publically (INTERFACE/PUBLIC).
#
function (get_contrib_targets var)
set (targets)
get_all_targets_recursive (targets ${CMAKE_CURRENT_SOURCE_DIR}/contrib)
set (${var} ${targets} PARENT_SCOPE)
endfunction()
function (sanitize_interface_flags target)
get_target_property(target_type ${target} TYPE)
get_property(compile_definitions TARGET ${target} PROPERTY INTERFACE_COMPILE_DEFINITIONS)
get_property(compile_options TARGET ${target} PROPERTY INTERFACE_COMPILE_OPTIONS)
if (NOT "${compile_options}" STREQUAL "")
message(FATAL_ERROR "${target} set INTERFACE_COMPILE_OPTIONS to ${compile_options}. This is forbidden.")
endif()
if ("${compile_definitions}" MATCHES "-Wl,")
# linker option - OK
elseif ("${compile_definitions}" MATCHES "-W")
message(FATAL_ERROR "${target} contains ${compile_definitions} flags in INTERFACE_COMPILE_DEFINITIONS. This is forbidden.")
endif()
endfunction()
get_contrib_targets (contrib_targets)
foreach (contrib_target ${contrib_targets})
sanitize_interface_flags(${contrib_target})
endforeach()

View File

@ -57,7 +57,7 @@ add_library(cxx ${SRCS})
set_target_properties(cxx PROPERTIES FOLDER "contrib/libcxx-cmake")
target_include_directories(cxx SYSTEM BEFORE PRIVATE $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/src>)
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>)
target_include_directories(cxx SYSTEM BEFORE PUBLIC $<$<COMPILE_LANGUAGE:CXX>:$<BUILD_INTERFACE:${LIBCXX_SOURCE_DIR}/include>>)
target_compile_definitions(cxx PRIVATE -D_LIBCPP_BUILDING_LIBRARY -DLIBCXX_BUILDING_LIBCXXABI)
# Enable capturing stack traces for all exceptions.

View File

@ -451,6 +451,7 @@ else
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'")
# NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected
# ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part")
# FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 - bad mutation does not indicate backward incompatibility
echo "Check for Error messages in server log:"
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
@ -485,6 +486,7 @@ else
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
-e "Code: 269. DB::Exception: Destination table is myself" \
-e "Coordination::Exception: Connection loss" \
-e "MutateFromLogEntryTask" \
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv

View File

@ -0,0 +1,31 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.8.9.24-lts (a1b69551d40) FIXME as compared to v22.8.8.3-lts (ac5a6cababc)
#### Performance Improvement
* Backported in [#43012](https://github.com/ClickHouse/ClickHouse/issues/43012): Keeper performance improvement: improve commit performance for cases when many different nodes have uncommitted states. This should help with cases when a follower node can't sync fast enough. [#42926](https://github.com/ClickHouse/ClickHouse/pull/42926) ([Antonio Andelic](https://github.com/antonio2368)).
#### Improvement
* Backported in [#42840](https://github.com/ClickHouse/ClickHouse/issues/42840): Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Build/Testing/Packaging Improvement
* Backported in [#42964](https://github.com/ClickHouse/ClickHouse/issues/42964): Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#43040](https://github.com/ClickHouse/ClickHouse/issues/43040): Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#42720](https://github.com/ClickHouse/ClickHouse/issues/42720): Fixed `Unknown identifier (aggregate-function)` exception which appears when a user tries to calculate WINDOW ORDER BY/PARTITION BY expressions over aggregate functions: ``` CREATE TABLE default.tenk1 ( `unique1` Int32, `unique2` Int32, `ten` Int32 ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192; SELECT ten, sum(unique1) + sum(unique2) AS res, rank() OVER (ORDER BY sum(unique1) + sum(unique2) ASC) AS rank FROM _complex GROUP BY ten ORDER BY ten ASC; ``` which gives: ``` Code: 47. DB::Exception: Received from localhost:9000. DB::Exception: Unknown identifier: sum(unique1); there are columns: unique1, unique2, ten: While processing sum(unique1) + sum(unique2) ASC. (UNKNOWN_IDENTIFIER) ```. [#39762](https://github.com/ClickHouse/ClickHouse/pull/39762) ([Vladimir Chebotaryov](https://github.com/quickhouse)).
* Backported in [#42748](https://github.com/ClickHouse/ClickHouse/issues/42748): A segmentation fault related to DNS & c-ares has been reported. The below error ocurred in multiple threads: ``` 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008088 [ 356 ] {} <Fatal> BaseDaemon: ######################################## 2022-09-28 15:41:19.008,"2022.09.28 15:41:19.008147 [ 356 ] {} <Fatal> BaseDaemon: (version 22.8.5.29 (official build), build id: 92504ACA0B8E2267) (from thread 353) (no query) Received signal Segmentation fault (11)" 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008196 [ 356 ] {} <Fatal> BaseDaemon: Address: 0xf Access: write. Address not mapped to object. 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008216 [ 356 ] {} <Fatal> BaseDaemon: Stack trace: 0x188f8212 0x1626851b 0x1626a69e 0x16269b3f 0x16267eab 0x13cf8284 0x13d24afc 0x13c5217e 0x14ec2495 0x15ba440f 0x15b9d13b 0x15bb2699 0x1891ccb3 0x1891e00d 0x18ae0769 0x18ade022 0x7f76aa985609 0x7f76aa8aa133 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008274 [ 356 ] {} <Fatal> BaseDaemon: 2. Poco::Net::IPAddress::family() const @ 0x188f8212 in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008297 [ 356 ] {} <Fatal> BaseDaemon: 3. ? @ 0x1626851b in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008309 [ 356 ] {} <Fatal> BaseDaemon: 4. ? @ 0x1626a69e in /usr/bin/clickhouse ```. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)).
* Backported in [#43062](https://github.com/ClickHouse/ClickHouse/issues/43062): Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Do not warn about kvm-clock [#41217](https://github.com/ClickHouse/ClickHouse/pull/41217) ([Sergei Trifonov](https://github.com/serxa)).
* Revert revert 41268 disable s3 parallel write for part moves to disk s3 [#42617](https://github.com/ClickHouse/ClickHouse/pull/42617) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -13,7 +13,7 @@ The supported formats are:
| Format | Input | Output |
|-------------------------------------------------------------------------------------------|------|--------|
| [TabSeparated](#tabseparated) | ✔ | ✔ |
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
@ -48,6 +48,7 @@ The supported formats are:
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
| [BSONEachRow](#bsoneachrow) | ✔ | ✔ |
| [TSKV](#tskv) | ✔ | ✔ |
| [Pretty](#pretty) | ✗ | ✔ |
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
@ -1210,6 +1211,69 @@ SELECT * FROM json_each_row_nested
- [output_format_json_array_of_rows](../operations/settings/settings.md#output_format_json_array_of_rows) - output a JSON array of all rows in JSONEachRow(Compact) format. Default value - `false`.
- [output_format_json_validate_utf8](../operations/settings/settings.md#output_format_json_validate_utf8) - enables validation of UTF-8 sequences in JSON output formats (note that it doesn't impact formats JSON/JSONCompact/JSONColumnsWithMetadata, they always validate utf8). Default value - `false`.
## BSONEachRow {#bsoneachrow}
In this format, ClickHouse formats/parses data as a sequence of BSON documents without any separator between them.
Each row is formatted as a single document and each column is formatted as a single BSON document field with column name as a key.
For output it uses the following correspondence between ClickHouse types and BSON types:
| ClickHouse type | BSON Type |
|-----------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------|
| [Bool](../sql-reference/data-types/boolean.md) | `\x08` boolean |
| [Int8/UInt8](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
| [Int16UInt16](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
| [Int32](../sql-reference/data-types/int-uint.md) | `\x10` int32 |
| [UInt32](../sql-reference/data-types/int-uint.md) | `\x12` int64 |
| [Int64/UInt64](../sql-reference/data-types/int-uint.md) | `\x12` int64 |
| [Float32/Float64](../sql-reference/data-types/float.md) | `\x01` double |
| [Date](../sql-reference/data-types/date.md)/[Date32](../sql-reference/data-types/date32.md) | `\x10` int32 |
| [DateTime](../sql-reference/data-types/datetime.md) | `\x12` int64 |
| [DateTime64](../sql-reference/data-types/datetime64.md) | `\x09` datetime |
| [Decimal32](../sql-reference/data-types/decimal.md) | `\x10` int32 |
| [Decimal64](../sql-reference/data-types/decimal.md) | `\x12` int64 |
| [Decimal128](../sql-reference/data-types/decimal.md) | `\x05` binary, `\x00` binary subtype, size = 16 |
| [Decimal256](../sql-reference/data-types/decimal.md) | `\x05` binary, `\x00` binary subtype, size = 32 |
| [Int128/UInt128](../sql-reference/data-types/int-uint.md) | `\x05` binary, `\x00` binary subtype, size = 16 |
| [Int256/UInt256](../sql-reference/data-types/int-uint.md) | `\x05` binary, `\x00` binary subtype, size = 32 |
| [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) | `\x05` binary, `\x00` binary subtype or \x02 string if setting output_format_bson_string_as_string is enabled |
| [UUID](../sql-reference/data-types/uuid.md) | `\x05` binary, `\x04` uuid subtype, size = 16 |
| [Array](../sql-reference/data-types/array.md) | `\x04` array |
| [Tuple](../sql-reference/data-types/tuple.md) | `\x04` array |
| [Named Tuple](../sql-reference/data-types/tuple.md) | `\x03` document |
| [Map](../sql-reference/data-types/map.md) (with String keys) | `\x03` document |
For input it uses the following correspondence between BSON types and ClickHouse types:
| BSON Type | ClickHouse Type |
|------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `\x01` double | [Float32/Float64](../sql-reference/data-types/float.md) |
| `\x02` string | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
| `\x03` document | [Map](../sql-reference/data-types/map.md)/[Named Tuple](../sql-reference/data-types/tuple.md) |
| `\x04` array | [Array](../sql-reference/data-types/array.md)/[Tuple](../sql-reference/data-types/tuple.md) |
| `\x05` binary, `\x00` binary subtype | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
| `\x05` binary, `\x02` old binary subtype | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
| `\x05` binary, `\x03` old uuid subtype | [UUID](../sql-reference/data-types/uuid.md) |
| `\x05` binary, `\x04` uuid subtype | [UUID](../sql-reference/data-types/uuid.md) |
| `\x07` ObjectId | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
| `\x08` boolean | [Bool](../sql-reference/data-types/boolean.md) |
| `\x09` datetime | [DateTime64](../sql-reference/data-types/datetime64.md) |
| `\x0A` null value | [NULL](../sql-reference/data-types/nullable.md) |
| `\x0D` JavaScript code | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
| `\x0E` symbol | [String](../sql-reference/data-types/string.md)/[FixedString](../sql-reference/data-types/fixedstring.md) |
| `\x10` int32 | [Int32/UInt32](../sql-reference/data-types/int-uint.md)/[Decimal32](../sql-reference/data-types/decimal.md) |
| `\x12` int64 | [Int64/UInt64](../sql-reference/data-types/int-uint.md)/[Decimal64](../sql-reference/data-types/decimal.md)/[DateTime64](../sql-reference/data-types/datetime64.md) |
Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8).
Big integers and decimals (Int128/UInt128/Int256/UInt256/Decimal128/Decimal256) can be parsed from BSON Binary value with `\x00` binary subtype. In this case this format will validate that the size of binary data equals the size of expected value.
Note: this format don't work properly on Big-Endian platforms.
### BSON format settings {#bson-format-settings}
- [output_format_bson_string_as_string](../operations/settings/settings.md#output_format_bson_string_as_string) - use BSON String type instead of Binary for String columns. Default value - `false`.
- [input_format_bson_skip_fields_with_unsupported_types_in_schema_inference](../operations/settings/settings.md#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for format BSONEachRow. Default value - `false`.
## Native {#native}
The most efficient format. Data is written and read by blocks in binary format. For each block, the number of rows, number of columns, column names and types, and parts of columns in this block are recorded one after another. In other words, this format is “columnar” it does not convert columns to rows. This is the format used in the native interface for interaction between servers, for using the command-line client, and for C++ clients.

View File

@ -57,7 +57,7 @@ Internal coordination settings are located in the `<keeper_server>.<coordination
- `auto_forwarding` — Allow to forward write requests from followers to the leader (default: true).
- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000).
- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000).
- `four_letter_word_white_list` — White list of 4lw commands (default: `conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro`).
- `four_letter_word_white_list` — White list of 4lw commands (default: `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif,rqld`).
Quorum configuration is located in the `<keeper_server>.<raft_configuration>` section and contain servers description.
@ -126,7 +126,7 @@ clickhouse keeper --config /etc/your_path_to_config/config.xml
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif`.
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value `conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif,rqld`.
You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port.
@ -328,6 +328,12 @@ target_committed_log_idx 101
last_snapshot_idx 50
```
- `rqld`: Request to become new leader. Return `Sent leadership request to leader.` if request sent or `Failed to send leadership request to leader.` if request not sent. Note that if node is already leader the outcome is same as the request is sent.
```
Sent leadership request to leader.
```
## Migration from ZooKeeper {#migration-from-zookeeper}
Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration:

View File

@ -4784,7 +4784,7 @@ Possible values:
Default value: 1.
## SQLInsert format settings {$sqlinsert-format-settings}
## SQLInsert format settings {#sqlinsert-format-settings}
### output_format_sql_insert_max_batch_size {#output_format_sql_insert_max_batch_size}
@ -4815,3 +4815,17 @@ Default value: `false`.
Quote column names with "`" characters
Default value: `true`.
## BSONEachRow format settings {#bson-each-row-format-settings}
### output_format_bson_string_as_string {#output_format_bson_string_as_string}
Use BSON String type instead of Binary for String columns.
Disabled by default.
### input_format_bson_skip_fields_with_unsupported_types_in_schema_inference {#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference}
Allow skipping columns with unsupported types while schema inference for format BSONEachRow.
Disabled by default.

View File

@ -11,6 +11,14 @@ Projections store data in a format that optimizes query execution, this feature
You can define one or more projections for a table, and during the query analysis the projection with the least data to scan will be selected by ClickHouse without modifying the query provided by the user.
:::note Disk usage
Projections will create internally a new hidden table, this means that more IO and space on disk will be required.
Example, If the projection has defined a different primary key, all the data from the original table will be duplicated.
:::
You can see more technical details about how projections work internally on this [page](/docs/en/guides/improving-query-performance/sparse-primary-indexes/sparse-primary-indexes-multiple.md/#option-3-projections).
## Example filtering without using primary keys
Creating the table:

View File

@ -60,7 +60,7 @@ If you specify `POPULATE`, the existing table data is inserted into the view whe
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data wont be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
The execution of [ALTER](../../../sql-reference/statements/alter/view.md) queries on materialized views has limitations, so they might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view.
The execution of [ALTER](/docs/en/sql-reference/statements/alter/view.md) queries on materialized views has limitations, for example, you can not update the `SELECT` query, so this might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view.
Note that materialized view is influenced by [optimize_on_insert](../../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged before the insertion into a view.

View File

@ -47,6 +47,7 @@ Union
- `AST` — Abstract syntax tree.
- `SYNTAX` — Query text after AST-level optimizations.
- `QUERY TREE` — Query tree after Query Tree level optimizations.
- `PLAN` — Query execution plan.
- `PIPELINE` — Query execution pipeline.
@ -110,6 +111,32 @@ FROM
CROSS JOIN system.numbers AS c
```
### EXPLAIN QUERY TREE
Settings:
- `run_passes` — Run all query tree passes before dumping the query tree. Defaul: `1`.
- `dump_passes` — Dump information about used passes before dumping the query tree. Default: `0`.
- `passes` — Specifies how many passes to run. If set to `-1`, runs all the passes. Default: `-1`.
Example:
```sql
EXPLAIN QUERY TREE SELECT id, value FROM test_table;
```
```
QUERY id: 0
PROJECTION COLUMNS
id UInt64
value String
PROJECTION
LIST id: 1, nodes: 2
COLUMN id: 2, column_name: id, result_type: UInt64, source_id: 3
COLUMN id: 4, column_name: value, result_type: String, source_id: 3
JOIN TREE
TABLE id: 3, table_name: default.test_table
```
### EXPLAIN PLAN
Dump query plan steps.

View File

@ -243,6 +243,54 @@ If `max_rows_to_group_by` and `group_by_overflow_mode = 'any'` are not used, all
You can use `WITH TOTALS` in subqueries, including subqueries in the [JOIN](../../../sql-reference/statements/select/join.md) clause (in this case, the respective total values are combined).
## GROUP BY ALL
`GROUP BY ALL` is equivalent to listing all the SELECT-ed expressions that are not aggregate functions.
For example:
``` sql
SELECT
a * 2,
b,
count(c),
FROM t
GROUP BY ALL
```
is the same as
``` sql
SELECT
a * 2,
b,
count(c),
FROM t
GROUP BY a * 2, b
```
For a special case that if there is a function having both aggregate functions and other fields as its arguments, the `GROUP BY` keys will contain the maximum non-aggregate fields we can extract from it.
For example:
``` sql
SELECT
substring(a, 4, 2),
substring(substring(a, 1, 2), 1, count(b))
FROM t
GROUP BY ALL
```
is the same as
``` sql
SELECT
substring(a, 4, 2),
substring(substring(a, 1, 2), 1, count(b))
FROM t
GROUP BY substring(a, 4, 2), substring(a, 1, 2)
```
## Examples
Example:

View File

@ -0,0 +1,75 @@
---
slug: /en/sql-reference/table-functions/format
sidebar_position: 56
sidebar_label: format
---
# format
Extracts table structure from data and parses it according to specified input format.
**Syntax**
``` sql
format(format_name, data)
```
**Parameters**
- `format_name` — The [format](../../interfaces/formats.md#formats) of the data.
- `data` — String literal or constant expression that returns a string containing data in specified format
**Returned value**
A table with data parsed from `data` argument according specified format and extracted schema.
**Examples**
**Query:**
``` sql
:) select * from format(JSONEachRow,
$$
{"a": "Hello", "b": 111}
{"a": "World", "b": 123}
{"a": "Hello", "b": 112}
{"a": "World", "b": 124}
$$)
```
**Result:**
```text
┌───b─┬─a─────┐
│ 111 │ Hello │
│ 123 │ World │
│ 112 │ Hello │
│ 124 │ World │
└─────┴───────┘
```
**Query:**
```sql
:) desc format(JSONEachRow,
$$
{"a": "Hello", "b": 111}
{"a": "World", "b": 123}
{"a": "Hello", "b": 112}
{"a": "World", "b": 124}
$$)
```
**Result:**
```text
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ b │ Nullable(Float64) │ │ │ │ │ │
│ a │ Nullable(String) │ │ │ │ │ │
└──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
```
**See Also**
- [Formats](../../interfaces/formats.md)
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/format) <!--hide-->

View File

@ -98,7 +98,7 @@ ClickHouse предоставляет возможность аутентифи
:::danger "Важно"
Если пользователь настроен для Kerberos-аутентификации, другие виды уатентификации будут для него недоступны. Если наряду с `kerberos` в определении пользователя будет указан какой-либо другой способ аутентификации, ClickHouse завершит работу.
Если пользователь настроен для Kerberos-аутентификации, другие виды аутентификации будут для него недоступны. Если наряду с `kerberos` в определении пользователя будет указан какой-либо другой способ аутентификации, ClickHouse завершит работу.
:::info ""
Ещё раз отметим, что кроме `users.xml`, необходимо также включить Kerberos в `config.xml`.

View File

@ -0,0 +1 @@
../../../en/sql-reference/table-functions/format.md

View File

@ -74,7 +74,7 @@ Kafka 特性:
消费的消息会被自动追踪,因此每个消息在不同的消费组里只会记录一次。如果希望获得两次数据,则使用另一个组名创建副本。
消费组可以灵活配置并且在集群之间同步。例如如果群集中有10个主题和5个表副本则每个副本将获得2个主题。 如果副本数量发生变化,主题将自动在副本中重新分配。了解更多信息请访问 http://kafka.apache.org/intro。
消费组可以灵活配置并且在集群之间同步。例如如果群集中有10个主题和5个表副本则每个副本将获得2个主题。 如果副本数量发生变化,主题将自动在副本中重新分配。了解更多信息请访问 [http://kafka.apache.org/intro](http://kafka.apache.org/intro)
`SELECT` 查询对于读取消息并不是很有用(调试除外),因为每条消息只能被读取一次。使用物化视图创建实时线程更实用。您可以这样做:

View File

@ -164,7 +164,7 @@ SETTINGS index_granularity = 8192, index_granularity_bytes = 0;
<li><font face = "monospace">index_granularity</font>: 显式设置为其默认值8192。这意味着对于每一组8192行主索引将有一个索引条目例如如果表包含16384行那么索引将有两个索引条目。
</li>
<br/>
<li><font face = "monospace">index_granularity_bytes</font>: 设置为0表示禁止<a href="https://clickhouse.com/docs/en/whats-new/changelog/2019/#experimental-features-1" target="_blank"><font color="blue">适应索引粒度</font></a>。自适应索引粒度意味着ClickHouse自动为一组n行创建一个索引条目
<li><font face = "monospace">index_granularity_bytes</font>: 设置为0表示禁止<a href="https://clickhouse.com/docs/en/whats-new/changelog/2019/#experimental-features-1" target="_blank"><font color="blue">适应索引粒度</font></a>。自适应索引粒度意味着ClickHouse自动为一组n行创建一个索引条目
<ul>
<li>如果n小于8192但n行的合并行数据大小大于或等于10MB (index_granularity_bytes的默认值)或</li>
<li>n达到8192</li>

View File

@ -77,6 +77,54 @@ sidebar_label: GROUP BY
您可以使用 `WITH TOTALS` 在子查询中,包括在子查询 [JOIN](../../../sql-reference/statements/select/join.md) 子句(在这种情况下,将各自的总值合并)。
## GROUP BY ALL {#group-by-all}
`GROUP BY ALL` 相当于对所有被查询的并且不被聚合函数使用的字段进行`GROUP BY`。
例如
``` sql
SELECT
a * 2,
b,
count(c),
FROM t
GROUP BY ALL
```
效果等同于
``` sql
SELECT
a * 2,
b,
count(c),
FROM t
GROUP BY a * 2, b
```
对于一种特殊情况,如果一个 function 的参数中同时有聚合函数和其他字段,会对参数中能提取的最大非聚合字段进行`GROUP BY`。
例如:
``` sql
SELECT
substring(a, 4, 2),
substring(substring(a, 1, 2), 1, count(b))
FROM t
GROUP BY ALL
```
效果等同于
``` sql
SELECT
substring(a, 4, 2),
substring(substring(a, 1, 2), 1, count(b))
FROM t
GROUP BY substring(a, 4, 2), substring(a, 1, 2)
```
## 例子 {#examples}
示例:

View File

@ -0,0 +1 @@
../../../en/sql-reference/table-functions/format.md

View File

@ -9,7 +9,10 @@ After=time-sync.target network-online.target
Wants=time-sync.target
[Service]
Type=simple
Type=notify
# Switching off watchdog is very important for sd_notify to work correctly.
Environment=CLICKHOUSE_WATCHDOG_ENABLE=0
User=clickhouse
Group=clickhouse
Restart=always

View File

@ -160,7 +160,7 @@ void ClusterCopierApp::mainImpl()
registerTableFunctions();
registerStorages();
registerDictionaries();
registerDisks();
registerDisks(/* global_skip_access_check= */ true);
registerFormats();
static const std::string default_database = "_local";

View File

@ -80,8 +80,8 @@ require (
go.opentelemetry.io/otel v1.4.1 // indirect
go.opentelemetry.io/otel/trace v1.4.1 // indirect
golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
golang.org/x/text v0.3.8 // indirect
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
google.golang.org/grpc v1.43.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect

View File

@ -1078,8 +1078,8 @@ golang.org/x/sys v0.0.0-20211109184856-51b60fd695b3/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1089,8 +1089,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@ -176,7 +176,7 @@ int DisksApp::main(const std::vector<String> & /*args*/)
Poco::Logger::root().setLevel(Poco::Logger::parseLevel(log_level));
}
registerDisks();
registerDisks(/* global_skip_access_check= */ true);
registerFormats();
shared_context = Context::createShared();

View File

@ -330,6 +330,7 @@ HTTPContextPtr Keeper::httpContext()
}
int Keeper::main(const std::vector<std::string> & /*args*/)
try
{
Poco::Logger * log = &logger();
@ -584,6 +585,12 @@ int Keeper::main(const std::vector<std::string> & /*args*/)
return Application::EXIT_OK;
}
catch (...)
{
/// Poco does not provide stacktrace.
tryLogCurrentException("Application");
throw;
}
void Keeper::logRevision() const

View File

@ -413,7 +413,7 @@ try
registerTableFunctions();
registerStorages();
registerDictionaries();
registerDisks();
registerDisks(/* global_skip_access_check= */ true);
registerFormats();
processConfig();

View File

@ -99,6 +99,10 @@
#include "config_version.h"
#if defined(OS_LINUX)
# include <cstddef>
# include <cstdlib>
# include <sys/socket.h>
# include <sys/un.h>
# include <sys/mman.h>
# include <sys/ptrace.h>
# include <Common/hasLinuxCapability.h>
@ -273,6 +277,7 @@ namespace ErrorCodes
extern const int MISMATCHING_USERS_FOR_PROCESS_AND_DATA;
extern const int NETWORK_ERROR;
extern const int CORRUPTED_DATA;
extern const int SYSTEM_ERROR;
}
@ -646,7 +651,53 @@ static void sanityChecks(Server & server)
}
}
#if defined(OS_LINUX)
/// Sends notification to systemd, analogous to sd_notify from libsystemd
static void systemdNotify(const std::string_view & command)
{
const char * path = getenv("NOTIFY_SOCKET"); // NOLINT(concurrency-mt-unsafe)
if (path == nullptr)
return; /// not using systemd
int s = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
if (s == -1)
throwFromErrno("Can't create UNIX socket for systemd notify.", ErrorCodes::SYSTEM_ERROR);
SCOPE_EXIT({ close(s); });
const size_t len = strlen(path);
struct sockaddr_un addr;
addr.sun_family = AF_UNIX;
if (len < 2 || len > sizeof(addr.sun_path) - 1)
throw Exception(ErrorCodes::SYSTEM_ERROR, "NOTIFY_SOCKET env var value \"{}\" is wrong.", path);
memcpy(addr.sun_path, path, len + 1); /// write last zero as well.
size_t addrlen = offsetof(struct sockaddr_un, sun_path) + len;
/// '@' meass this is Linux abstract socket, per documentation it must be sun_path[0] must be set to '\0' for it.
if (path[0] == '@')
addr.sun_path[0] = 0;
else if (path[0] == '/')
addrlen += 1; /// non-abstract-addresses should be zero terminated.
else
throw Exception(ErrorCodes::SYSTEM_ERROR, "Wrong UNIX path \"{}\" in NOTIFY_SOCKET env var", path);
const struct sockaddr *sock_addr = reinterpret_cast <const struct sockaddr *>(&addr);
if (sendto(s, command.data(), command.size(), 0, sock_addr, static_cast <socklen_t>(addrlen)) != static_cast <ssize_t>(command.size()))
throw Exception("Failed to notify systemd.", ErrorCodes::SYSTEM_ERROR);
}
#endif
int Server::main(const std::vector<std::string> & /*args*/)
try
{
Poco::Logger * log = &logger();
@ -679,7 +730,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
registerTableFunctions();
registerStorages();
registerDictionaries();
registerDisks();
registerDisks(/* global_skip_access_check= */ false);
registerFormats();
registerRemoteFileMetadatas();
@ -1148,6 +1199,9 @@ int Server::main(const std::vector<std::string> & /*args*/)
total_memory_tracker.setDescription("(total)");
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
bool allow_use_jemalloc_memory = config->getBool("allow_use_jemalloc_memory", true);
total_memory_tracker.setAllowUseJemallocMemory(allow_use_jemalloc_memory);
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
total_memory_tracker.setOvercommitTracker(global_overcommit_tracker);
@ -1776,6 +1830,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
tryLogCurrentException(log, "Caught exception while starting cluster discovery");
}
#if defined(OS_LINUX)
systemdNotify("READY=1\n");
#endif
SCOPE_EXIT_SAFE({
LOG_DEBUG(log, "Received termination signal.");
@ -1845,6 +1903,12 @@ int Server::main(const std::vector<std::string> & /*args*/)
return Application::EXIT_OK;
}
catch (...)
{
/// Poco does not provide stacktrace.
tryLogCurrentException("Application");
throw;
}
std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
const Poco::Util::AbstractConfiguration & config,

View File

@ -130,8 +130,8 @@ enum class AccessType
M(SHOW_ROW_POLICIES, "SHOW POLICIES, SHOW CREATE ROW POLICY, SHOW CREATE POLICY", TABLE, SHOW_ACCESS) \
M(SHOW_QUOTAS, "SHOW CREATE QUOTA", GLOBAL, SHOW_ACCESS) \
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", GLOBAL, SHOW_ACCESS) \
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", GROUP, ACCESS_MANAGEMENT) \
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
\
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \

View File

@ -2,6 +2,8 @@
#include <Access/LDAPClient.h>
#include <Common/Exception.h>
#include <Common/quoteString.h>
#include <Common/SipHash.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <boost/algorithm/string/case_conv.hpp>
@ -73,6 +75,7 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
const bool has_tls_ca_cert_file = config.has(ldap_server_config + ".tls_ca_cert_file");
const bool has_tls_ca_cert_dir = config.has(ldap_server_config + ".tls_ca_cert_dir");
const bool has_tls_cipher_suite = config.has(ldap_server_config + ".tls_cipher_suite");
const bool has_search_limit = config.has(ldap_server_config + ".search_limit");
if (!has_host)
throw Exception("Missing 'host' entry", ErrorCodes::BAD_ARGUMENTS);
@ -91,8 +94,8 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
}
else if (has_auth_dn_prefix || has_auth_dn_suffix)
{
const auto auth_dn_prefix = config.getString(ldap_server_config + ".auth_dn_prefix");
const auto auth_dn_suffix = config.getString(ldap_server_config + ".auth_dn_suffix");
std::string auth_dn_prefix = config.getString(ldap_server_config + ".auth_dn_prefix");
std::string auth_dn_suffix = config.getString(ldap_server_config + ".auth_dn_suffix");
params.bind_dn = auth_dn_prefix + "{user_name}" + auth_dn_suffix;
}
@ -176,14 +179,17 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
if (has_port)
{
const auto port = config.getInt64(ldap_server_config + ".port");
if (port < 0 || port > 65535)
UInt32 port = config.getUInt(ldap_server_config + ".port");
if (port > 65535)
throw Exception("Bad value for 'port' entry", ErrorCodes::BAD_ARGUMENTS);
params.port = port;
}
else
params.port = (params.enable_tls == LDAPClient::Params::TLSEnable::YES ? 636 : 389);
if (has_search_limit)
params.search_limit = static_cast<UInt32>(config.getUInt64(ldap_server_config + ".search_limit"));
}
void parseKerberosParams(GSSAcceptorContext::Params & params, const Poco::Util::AbstractConfiguration & config)
@ -313,11 +319,26 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
}
}
UInt128 computeParamsHash(const LDAPClient::Params & params, const LDAPClient::RoleSearchParamsList * role_search_params)
{
SipHash hash;
params.updateHash(hash);
if (role_search_params)
{
for (const auto & params_instance : *role_search_params)
{
params_instance.updateHash(hash);
}
}
return hash.get128();
}
bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const BasicCredentials & credentials,
const LDAPClient::RoleSearchParamsList * role_search_params, LDAPClient::SearchResultsList * role_search_results) const
{
std::optional<LDAPClient::Params> params;
std::size_t params_hash = 0;
UInt128 params_hash = 0;
{
std::scoped_lock lock(mutex);
@ -331,14 +352,7 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B
params->user = credentials.getUserName();
params->password = credentials.getPassword();
params->combineCoreHash(params_hash);
if (role_search_params)
{
for (const auto & params_instance : *role_search_params)
{
params_instance.combineHash(params_hash);
}
}
params_hash = computeParamsHash(*params, role_search_params);
// Check the cache, but only if the caching is enabled at all.
if (params->verification_cooldown > std::chrono::seconds{0})
@ -408,15 +422,7 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B
new_params.user = credentials.getUserName();
new_params.password = credentials.getPassword();
std::size_t new_params_hash = 0;
new_params.combineCoreHash(new_params_hash);
if (role_search_params)
{
for (const auto & params_instance : *role_search_params)
{
params_instance.combineHash(new_params_hash);
}
}
const UInt128 new_params_hash = computeParamsHash(new_params, role_search_params);
// If the critical server params have changed while we were checking the password, we discard the current result.
if (params_hash != new_params_hash)

View File

@ -2,10 +2,10 @@
#include <Common/Exception.h>
#include <base/scope_guard.h>
#include <Common/logger_useful.h>
#include <Common/SipHash.h>
#include <Poco/Logger.h>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/container_hash/hash.hpp>
#include <mutex>
#include <utility>
@ -15,6 +15,22 @@
#include <sys/time.h>
namespace
{
template <typename T, typename = std::enable_if_t<std::is_fundamental_v<std::decay_t<T>>>>
void updateHash(SipHash & hash, const T & value)
{
hash.update(value);
}
void updateHash(SipHash & hash, const std::string & value)
{
hash.update(value.size());
hash.update(value);
}
}
namespace DB
{
@ -26,30 +42,30 @@ namespace ErrorCodes
extern const int LDAP_ERROR;
}
void LDAPClient::SearchParams::combineHash(std::size_t & seed) const
void LDAPClient::SearchParams::updateHash(SipHash & hash) const
{
boost::hash_combine(seed, base_dn);
boost::hash_combine(seed, static_cast<int>(scope));
boost::hash_combine(seed, search_filter);
boost::hash_combine(seed, attribute);
::updateHash(hash, base_dn);
::updateHash(hash, static_cast<int>(scope));
::updateHash(hash, search_filter);
::updateHash(hash, attribute);
}
void LDAPClient::RoleSearchParams::combineHash(std::size_t & seed) const
void LDAPClient::RoleSearchParams::updateHash(SipHash & hash) const
{
SearchParams::combineHash(seed);
boost::hash_combine(seed, prefix);
SearchParams::updateHash(hash);
::updateHash(hash, prefix);
}
void LDAPClient::Params::combineCoreHash(std::size_t & seed) const
void LDAPClient::Params::updateHash(SipHash & hash) const
{
boost::hash_combine(seed, host);
boost::hash_combine(seed, port);
boost::hash_combine(seed, bind_dn);
boost::hash_combine(seed, user);
boost::hash_combine(seed, password);
::updateHash(hash, host);
::updateHash(hash, port);
::updateHash(hash, bind_dn);
::updateHash(hash, user);
::updateHash(hash, password);
if (user_dn_detection)
user_dn_detection->combineHash(seed);
user_dn_detection->updateHash(hash);
}
LDAPClient::LDAPClient(const Params & params_)
@ -153,13 +169,13 @@ namespace
}
void LDAPClient::diag(int rc, String text)
void LDAPClient::handleError(int result_code, String text)
{
std::scoped_lock lock(ldap_global_mutex);
if (rc != LDAP_SUCCESS)
if (result_code != LDAP_SUCCESS)
{
const char * raw_err_str = ldap_err2string(rc);
const char * raw_err_str = ldap_err2string(result_code);
if (raw_err_str && *raw_err_str != '\0')
{
if (!text.empty())
@ -214,7 +230,7 @@ bool LDAPClient::openConnection()
SCOPE_EXIT({ ldap_memfree(uri); });
diag(ldap_initialize(&handle, uri));
handleError(ldap_initialize(&handle, uri));
if (!handle)
throw Exception("ldap_initialize() failed", ErrorCodes::LDAP_ERROR);
}
@ -226,13 +242,13 @@ bool LDAPClient::openConnection()
case LDAPClient::Params::ProtocolVersion::V2: value = LDAP_VERSION2; break;
case LDAPClient::Params::ProtocolVersion::V3: value = LDAP_VERSION3; break;
}
diag(ldap_set_option(handle, LDAP_OPT_PROTOCOL_VERSION, &value));
handleError(ldap_set_option(handle, LDAP_OPT_PROTOCOL_VERSION, &value));
}
diag(ldap_set_option(handle, LDAP_OPT_RESTART, LDAP_OPT_ON));
handleError(ldap_set_option(handle, LDAP_OPT_RESTART, LDAP_OPT_ON));
#ifdef LDAP_OPT_KEEPCONN
diag(ldap_set_option(handle, LDAP_OPT_KEEPCONN, LDAP_OPT_ON));
handleError(ldap_set_option(handle, LDAP_OPT_KEEPCONN, LDAP_OPT_ON));
#endif
#ifdef LDAP_OPT_TIMEOUT
@ -240,7 +256,7 @@ bool LDAPClient::openConnection()
::timeval operation_timeout;
operation_timeout.tv_sec = params.operation_timeout.count();
operation_timeout.tv_usec = 0;
diag(ldap_set_option(handle, LDAP_OPT_TIMEOUT, &operation_timeout));
handleError(ldap_set_option(handle, LDAP_OPT_TIMEOUT, &operation_timeout));
}
#endif
@ -249,18 +265,18 @@ bool LDAPClient::openConnection()
::timeval network_timeout;
network_timeout.tv_sec = params.network_timeout.count();
network_timeout.tv_usec = 0;
diag(ldap_set_option(handle, LDAP_OPT_NETWORK_TIMEOUT, &network_timeout));
handleError(ldap_set_option(handle, LDAP_OPT_NETWORK_TIMEOUT, &network_timeout));
}
#endif
{
const int search_timeout = static_cast<int>(params.search_timeout.count());
diag(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout));
handleError(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout));
}
{
const int size_limit = params.search_limit;
diag(ldap_set_option(handle, LDAP_OPT_SIZELIMIT, &size_limit));
const int size_limit = static_cast<int>(params.search_limit);
handleError(ldap_set_option(handle, LDAP_OPT_SIZELIMIT, &size_limit));
}
#ifdef LDAP_OPT_X_TLS_PROTOCOL_MIN
@ -274,7 +290,7 @@ bool LDAPClient::openConnection()
case LDAPClient::Params::TLSProtocolVersion::TLS1_1: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_1; break;
case LDAPClient::Params::TLSProtocolVersion::TLS1_2: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; break;
}
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_PROTOCOL_MIN, &value));
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_PROTOCOL_MIN, &value));
}
#endif
@ -288,44 +304,44 @@ bool LDAPClient::openConnection()
case LDAPClient::Params::TLSRequireCert::TRY: value = LDAP_OPT_X_TLS_TRY; break;
case LDAPClient::Params::TLSRequireCert::DEMAND: value = LDAP_OPT_X_TLS_DEMAND; break;
}
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_REQUIRE_CERT, &value));
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_REQUIRE_CERT, &value));
}
#endif
#ifdef LDAP_OPT_X_TLS_CERTFILE
if (!params.tls_cert_file.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CERTFILE, params.tls_cert_file.c_str()));
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_CERTFILE, params.tls_cert_file.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_KEYFILE
if (!params.tls_key_file.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_KEYFILE, params.tls_key_file.c_str()));
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_KEYFILE, params.tls_key_file.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_CACERTFILE
if (!params.tls_ca_cert_file.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTFILE, params.tls_ca_cert_file.c_str()));
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTFILE, params.tls_ca_cert_file.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_CACERTDIR
if (!params.tls_ca_cert_dir.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTDIR, params.tls_ca_cert_dir.c_str()));
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTDIR, params.tls_ca_cert_dir.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_CIPHER_SUITE
if (!params.tls_cipher_suite.empty())
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CIPHER_SUITE, params.tls_cipher_suite.c_str()));
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_CIPHER_SUITE, params.tls_cipher_suite.c_str()));
#endif
#ifdef LDAP_OPT_X_TLS_NEWCTX
{
const int i_am_a_server = 0;
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_NEWCTX, &i_am_a_server));
handleError(ldap_set_option(handle, LDAP_OPT_X_TLS_NEWCTX, &i_am_a_server));
}
#endif
if (params.enable_tls == LDAPClient::Params::TLSEnable::YES_STARTTLS)
diag(ldap_start_tls_s(handle, nullptr, nullptr));
handleError(ldap_start_tls_s(handle, nullptr, nullptr));
final_user_name = escapeForDN(params.user);
final_bind_dn = replacePlaceholders(params.bind_dn, { {"{user_name}", final_user_name} });
@ -346,7 +362,7 @@ bool LDAPClient::openConnection()
if (rc == LDAP_INVALID_CREDENTIALS)
return false;
diag(rc);
handleError(rc);
}
// Once bound, run the user DN search query and update the default value, if asked.
@ -425,7 +441,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
}
});
diag(ldap_search_ext_s(handle, final_base_dn.c_str(), scope, final_search_filter.c_str(), attrs, 0, nullptr, nullptr, &timeout, params.search_limit, &msgs));
handleError(ldap_search_ext_s(handle, final_base_dn.c_str(), scope, final_search_filter.c_str(), attrs, 0, nullptr, nullptr, &timeout, params.search_limit, &msgs));
for (
auto * msg = ldap_first_message(handle, msgs);
@ -452,7 +468,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
::berval bv;
diag(ldap_get_dn_ber(handle, msg, &ber, &bv));
handleError(ldap_get_dn_ber(handle, msg, &ber, &bv));
if (bv.bv_val && bv.bv_len > 0)
result.emplace(bv.bv_val, bv.bv_len);
@ -504,7 +520,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
case LDAP_RES_SEARCH_REFERENCE:
{
char ** referrals = nullptr;
diag(ldap_parse_reference(handle, msg, &referrals, nullptr, 0));
handleError(ldap_parse_reference(handle, msg, &referrals, nullptr, 0));
if (referrals)
{
@ -528,7 +544,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
char * matched_msg = nullptr;
char * error_msg = nullptr;
diag(ldap_parse_result(handle, msg, &rc, &matched_msg, &error_msg, nullptr, nullptr, 0));
handleError(ldap_parse_result(handle, msg, &rc, &matched_msg, &error_msg, nullptr, nullptr, 0));
if (rc != LDAP_SUCCESS)
{
@ -610,7 +626,7 @@ bool LDAPSimpleAuthClient::authenticate(const RoleSearchParamsList * role_search
#else // USE_LDAP
void LDAPClient::diag(const int, String)
void LDAPClient::handleError(const int, String)
{
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
}

View File

@ -16,6 +16,7 @@
#include <set>
#include <vector>
class SipHash;
namespace DB
{
@ -38,7 +39,7 @@ public:
String search_filter;
String attribute = "cn";
void combineHash(std::size_t & seed) const;
void updateHash(SipHash & hash) const;
};
struct RoleSearchParams
@ -46,7 +47,7 @@ public:
{
String prefix;
void combineHash(std::size_t & seed) const;
void updateHash(SipHash & hash) const;
};
using RoleSearchParamsList = std::vector<RoleSearchParams>;
@ -95,7 +96,7 @@ public:
ProtocolVersion protocol_version = ProtocolVersion::V3;
String host;
std::uint16_t port = 636;
UInt16 port = 636;
TLSEnable enable_tls = TLSEnable::YES;
TLSProtocolVersion tls_minimum_protocol_version = TLSProtocolVersion::TLS1_2;
@ -119,9 +120,9 @@ public:
std::chrono::seconds operation_timeout{40};
std::chrono::seconds network_timeout{30};
std::chrono::seconds search_timeout{20};
std::uint32_t search_limit = 100;
UInt32 search_limit = 256; /// An arbitrary number, no particular motivation for this value.
void combineCoreHash(std::size_t & seed) const;
void updateHash(SipHash & hash) const;
};
explicit LDAPClient(const Params & params_);
@ -133,7 +134,7 @@ public:
LDAPClient & operator= (LDAPClient &&) = delete;
protected:
MAYBE_NORETURN void diag(int rc, String text = "");
MAYBE_NORETURN void handleError(int result_code, String text = "");
MAYBE_NORETURN bool openConnection();
void closeConnection() noexcept;
SearchResults search(const SearchParams & search_params);

View File

@ -228,6 +228,12 @@ namespace
user->access.revokeGrantOption(AccessType::ALL);
}
bool show_named_collections = config.getBool(user_config + ".show_named_collections", false);
if (!show_named_collections)
{
user->access.revoke(AccessType::SHOW_NAMED_COLLECTIONS);
}
String default_database = config.getString(user_config + ".default_database", "");
user->default_database = default_database;

View File

@ -13,6 +13,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int CORRUPTED_DATA;
}
@ -89,6 +90,13 @@ public:
{
this->data(place).result.read(buf, *serialization_res, arena);
this->data(place).value.read(buf, *serialization_val, arena);
if (unlikely(this->data(place).value.has() != this->data(place).result.has()))
throw Exception(
ErrorCodes::CORRUPTED_DATA,
"Invalid state of the aggregate function {}: has_value ({}) != has_result ({})",
getName(),
this->data(place).value.has(),
this->data(place).result.has());
}
bool allocatesMemoryInArena() const override

View File

@ -30,6 +30,7 @@ namespace ErrorCodes
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NOT_IMPLEMENTED;
extern const int TOO_LARGE_STRING_SIZE;
extern const int LOGICAL_ERROR;
}
/** Aggregate functions that store one of passed values.
@ -448,6 +449,34 @@ public:
};
struct Compatibility
{
/// Old versions used to store terminating null-character in SingleValueDataString.
/// Then -WithTerminatingZero methods were removed from IColumn interface,
/// because these methods are quite dangerous and easy to misuse. It introduced incompatibility.
/// See https://github.com/ClickHouse/ClickHouse/pull/41431 and https://github.com/ClickHouse/ClickHouse/issues/42916
/// Here we keep these functions for compatibility.
/// It's safe because there's no way unsanitized user input (without \0 at the end) can reach these functions.
static StringRef getDataAtWithTerminatingZero(const ColumnString & column, size_t n)
{
auto res = column.getDataAt(n);
/// ColumnString always reserves extra byte for null-character after string.
/// But getDataAt returns StringRef without the null-character. Let's add it.
chassert(res.data[res.size] == '\0');
++res.size;
return res;
}
static void insertDataWithTerminatingZero(ColumnString & column, const char * pos, size_t length)
{
/// String already has terminating null-character.
/// But insertData will add another one unconditionally. Trim existing null-character to avoid duplication.
chassert(0 < length);
chassert(pos[length - 1] == '\0');
column.insertData(pos, length - 1);
}
};
/** For strings. Short strings are stored in the object itself, and long strings are allocated separately.
* NOTE It could also be suitable for arrays of numbers.
@ -457,13 +486,15 @@ struct SingleValueDataString //-V730
private:
using Self = SingleValueDataString;
Int32 size = -1; /// -1 indicates that there is no value.
Int32 capacity = 0; /// power of two or zero
/// 0 size indicates that there is no value. Empty string must has terminating '\0' and, therefore, size of empty string is 1
UInt32 size = 0;
UInt32 capacity = 0; /// power of two or zero
char * large_data;
public:
static constexpr Int32 AUTOMATIC_STORAGE_SIZE = 64;
static constexpr Int32 MAX_SMALL_STRING_SIZE = AUTOMATIC_STORAGE_SIZE - sizeof(size) - sizeof(capacity) - sizeof(large_data);
static constexpr UInt32 AUTOMATIC_STORAGE_SIZE = 64;
static constexpr UInt32 MAX_SMALL_STRING_SIZE = AUTOMATIC_STORAGE_SIZE - sizeof(size) - sizeof(capacity) - sizeof(large_data);
static constexpr UInt32 MAX_STRING_SIZE = std::numeric_limits<Int32>::max();
private:
char small_data[MAX_SMALL_STRING_SIZE]; /// Including the terminating zero.
@ -474,12 +505,22 @@ public:
bool has() const
{
return size >= 0;
return size;
}
private:
char * getDataMutable()
{
return size <= MAX_SMALL_STRING_SIZE ? small_data : large_data;
}
const char * getData() const
{
return size <= MAX_SMALL_STRING_SIZE ? small_data : large_data;
const char * data_ptr = size <= MAX_SMALL_STRING_SIZE ? small_data : large_data;
/// It must always be terminated with null-character
chassert(0 < size);
chassert(data_ptr[size - 1] == '\0');
return data_ptr;
}
StringRef getStringRef() const
@ -487,65 +528,105 @@ public:
return StringRef(getData(), size);
}
public:
void insertResultInto(IColumn & to) const
{
if (has())
assert_cast<ColumnString &>(to).insertData(getData(), size);
Compatibility::insertDataWithTerminatingZero(assert_cast<ColumnString &>(to), getData(), size);
else
assert_cast<ColumnString &>(to).insertDefault();
}
void write(WriteBuffer & buf, const ISerialization & /*serialization*/) const
{
writeBinary(size, buf);
if (unlikely(MAX_STRING_SIZE < size))
throw Exception(ErrorCodes::LOGICAL_ERROR, "String size is too big ({}), it's a bug", size);
/// For serialization we use signed Int32 (for historical reasons), -1 means "no value"
Int32 size_to_write = size ? size : -1;
writeBinary(size_to_write, buf);
if (has())
buf.write(getData(), size);
}
void allocateLargeDataIfNeeded(UInt32 size_to_reserve, Arena * arena)
{
if (capacity < size_to_reserve)
{
if (unlikely(MAX_STRING_SIZE < size_to_reserve))
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", size_to_reserve);
size_t rounded_capacity = roundUpToPowerOfTwoOrZero(size_to_reserve);
chassert(rounded_capacity <= MAX_STRING_SIZE + 1); /// rounded_capacity <= 2^31
capacity = static_cast<UInt32>(rounded_capacity);
/// Don't free large_data here.
large_data = arena->alloc(capacity);
}
}
void read(ReadBuffer & buf, const ISerialization & /*serialization*/, Arena * arena)
{
Int32 rhs_size;
readBinary(rhs_size, buf);
/// For serialization we use signed Int32 (for historical reasons), -1 means "no value"
Int32 rhs_size_signed;
readBinary(rhs_size_signed, buf);
if (rhs_size >= 0)
if (rhs_size_signed < 0)
{
if (rhs_size <= MAX_SMALL_STRING_SIZE)
{
/// Don't free large_data here.
size = rhs_size;
if (size > 0)
buf.readStrict(small_data, size);
}
else
{
if (capacity < rhs_size)
{
capacity = static_cast<Int32>(roundUpToPowerOfTwoOrZero(rhs_size));
/// It might happen if the size was too big and the rounded value does not fit a size_t
if (unlikely(capacity < rhs_size))
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", rhs_size);
/// Don't free large_data here.
large_data = arena->alloc(capacity);
}
size = rhs_size;
buf.readStrict(large_data, size);
}
/// Don't free large_data here.
size = 0;
return;
}
else
UInt32 rhs_size = rhs_size_signed;
if (rhs_size <= MAX_SMALL_STRING_SIZE)
{
/// Don't free large_data here.
size = rhs_size;
buf.readStrict(small_data, size);
}
else
{
/// Reserve one byte more for null-character
allocateLargeDataIfNeeded(rhs_size + 1, arena);
size = rhs_size;
buf.readStrict(large_data, size);
}
/// Check if the string we read is null-terminated (getDataMutable does not have the assertion)
if (0 < size && getDataMutable()[size - 1] == '\0')
return;
/// It's not null-terminated, but it must be (for historical reasons). There are two variants:
/// - The value was serialized by one of the incompatible versions of ClickHouse. We had some range of versions
/// that used to serialize SingleValueDataString without terminating '\0'. Let's just append it.
/// - An attacker sent crafted data. Sanitize it and append '\0'.
/// In all other cases the string must be already null-terminated.
/// NOTE We cannot add '\0' unconditionally, because it will be duplicated.
/// NOTE It's possible that a string that actually ends with '\0' was written by one of the incompatible versions.
/// Unfortunately, we cannot distinguish it from normal string written by normal version.
/// So such strings will be trimmed.
if (size == MAX_SMALL_STRING_SIZE)
{
/// Special case: We have to move value to large_data
allocateLargeDataIfNeeded(size + 1, arena);
memcpy(large_data, small_data, size);
}
/// We have enough space to append
++size;
getDataMutable()[size - 1] = '\0';
}
/// Assuming to.has()
void changeImpl(StringRef value, Arena * arena)
{
Int32 value_size = static_cast<Int32>(value.size);
if (unlikely(MAX_STRING_SIZE < value.size))
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", value.size);
UInt32 value_size = static_cast<UInt32>(value.size);
if (value_size <= MAX_SMALL_STRING_SIZE)
{
@ -557,13 +638,7 @@ public:
}
else
{
if (capacity < value_size)
{
/// Don't free large_data here.
capacity = static_cast<Int32>(roundUpToPowerOfTwoOrZero(value_size));
large_data = arena->alloc(capacity);
}
allocateLargeDataIfNeeded(value_size, arena);
size = value_size;
memcpy(large_data, value.data, size);
}
@ -571,7 +646,7 @@ public:
void change(const IColumn & column, size_t row_num, Arena * arena)
{
changeImpl(assert_cast<const ColumnString &>(column).getDataAt(row_num), arena);
changeImpl(Compatibility::getDataAtWithTerminatingZero(assert_cast<const ColumnString &>(column), row_num), arena);
}
void change(const Self & to, Arena * arena)
@ -620,7 +695,7 @@ public:
bool changeIfLess(const IColumn & column, size_t row_num, Arena * arena)
{
if (!has() || assert_cast<const ColumnString &>(column).getDataAt(row_num) < getStringRef())
if (!has() || Compatibility::getDataAtWithTerminatingZero(assert_cast<const ColumnString &>(column), row_num) < getStringRef())
{
change(column, row_num, arena);
return true;
@ -642,7 +717,7 @@ public:
bool changeIfGreater(const IColumn & column, size_t row_num, Arena * arena)
{
if (!has() || assert_cast<const ColumnString &>(column).getDataAt(row_num) > getStringRef())
if (!has() || Compatibility::getDataAtWithTerminatingZero(assert_cast<const ColumnString &>(column), row_num) > getStringRef())
{
change(column, row_num, arena);
return true;
@ -669,7 +744,7 @@ public:
bool isEqualTo(const IColumn & column, size_t row_num) const
{
return has() && assert_cast<const ColumnString &>(column).getDataAt(row_num) == getStringRef();
return has() && Compatibility::getDataAtWithTerminatingZero(assert_cast<const ColumnString &>(column), row_num) == getStringRef();
}
static bool allocatesMemoryInArena()

View File

@ -155,7 +155,7 @@ public:
"Values for {} are expected to be Numeric, Float or Decimal, passed type {}",
getName(), value_type->getName()};
WhichDataType value_type_to_check(value_type);
WhichDataType value_type_to_check(value_type_without_nullable);
/// Do not promote decimal because of implementation issues of this function design
/// Currently we cannot get result column type in case of decimal we cannot get decimal scale

60
src/Analyzer/HashUtils.h Normal file
View File

@ -0,0 +1,60 @@
#pragma once
#include <Analyzer/IQueryTreeNode.h>
namespace DB
{
/** This structure holds query tree node ptr and its hash. It can be used as hash map key to avoid unnecessary hash
* recalculations.
*
* Example of usage:
* std::unordered_map<QueryTreeNodeConstRawPtrWithHash, std::string> map;
*/
template <typename QueryTreeNodePtrType>
struct QueryTreeNodeWithHash
{
QueryTreeNodeWithHash(QueryTreeNodePtrType node_) /// NOLINT
: node(std::move(node_))
, hash(node->getTreeHash().first)
{}
QueryTreeNodePtrType node = nullptr;
size_t hash = 0;
};
template <typename T>
inline bool operator==(const QueryTreeNodeWithHash<T> & lhs, const QueryTreeNodeWithHash<T> & rhs)
{
return lhs.hash == rhs.hash && lhs.node->isEqual(*rhs.node);
}
template <typename T>
inline bool operator!=(const QueryTreeNodeWithHash<T> & lhs, const QueryTreeNodeWithHash<T> & rhs)
{
return !(lhs == rhs);
}
using QueryTreeNodePtrWithHash = QueryTreeNodeWithHash<QueryTreeNodePtr>;
using QueryTreeNodeRawPtrWithHash = QueryTreeNodeWithHash<IQueryTreeNode *>;
using QueryTreeNodeConstRawPtrWithHash = QueryTreeNodeWithHash<const IQueryTreeNode *>;
using QueryTreeNodePtrWithHashSet = std::unordered_set<QueryTreeNodePtrWithHash>;
using QueryTreeNodeConstRawPtrWithHashSet = std::unordered_set<QueryTreeNodeConstRawPtrWithHash>;
template <typename Value>
using QueryTreeNodePtrWithHashMap = std::unordered_map<QueryTreeNodePtrWithHash, Value>;
template <typename Value>
using QueryTreeNodeConstRawPtrWithHashMap = std::unordered_map<QueryTreeNodeConstRawPtrWithHash, Value>;
}
template <typename T>
struct std::hash<DB::QueryTreeNodeWithHash<T>>
{
size_t operator()(const DB::QueryTreeNodeWithHash<T> & node_with_hash) const
{
return node_with_hash.hash;
}
};

View File

@ -8,6 +8,7 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/HashUtils.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeArray.h>
@ -48,43 +49,24 @@ public:
/// Do not apply for `count()` with without arguments or `count(*)`, only `count(x)` is supported.
return;
mapping[QueryTreeNodeWithHash(argument_nodes[0])].push_back(&node);
argument_to_functions_mapping[argument_nodes[0]].push_back(&node);
}
struct QueryTreeNodeWithHash
{
const QueryTreeNodePtr & node;
IQueryTreeNode::Hash hash;
explicit QueryTreeNodeWithHash(const QueryTreeNodePtr & node_)
: node(node_)
, hash(node->getTreeHash())
{}
bool operator==(const QueryTreeNodeWithHash & rhs) const
{
return hash == rhs.hash && node->isEqual(*rhs.node);
}
struct Hash
{
size_t operator() (const QueryTreeNodeWithHash & key) const { return key.hash.first ^ key.hash.second; }
};
};
/// argument -> list of sum/count/avg functions with this argument
std::unordered_map<QueryTreeNodeWithHash, std::vector<QueryTreeNodePtr *>, QueryTreeNodeWithHash::Hash> mapping;
QueryTreeNodePtrWithHashMap<std::vector<QueryTreeNodePtr *>> argument_to_functions_mapping;
private:
std::unordered_set<String> names_to_collect;
};
QueryTreeNodePtr createResolvedFunction(ContextPtr context, const String & name, DataTypePtr result_type, QueryTreeNodes arguments)
QueryTreeNodePtr createResolvedFunction(const ContextPtr & context, const String & name, const DataTypePtr & result_type, QueryTreeNodes arguments)
{
auto function_node = std::make_shared<FunctionNode>(name);
auto function = FunctionFactory::instance().get(name, context);
function_node->resolveAsFunction(std::move(function), result_type);
function_node->getArguments().getNodes() = std::move(arguments);
return function_node;
}
@ -94,21 +76,20 @@ FunctionNodePtr createResolvedAggregateFunction(const String & name, const Query
AggregateFunctionProperties properties;
auto aggregate_function = AggregateFunctionFactory::instance().get(name, {argument->getResultType()}, parameters, properties);
function_node->resolveAsAggregateFunction(aggregate_function, aggregate_function->getReturnType());
function_node->getArguments().getNodes() = { argument };
function_node->getArgumentsNode() = std::make_shared<ListNode>(QueryTreeNodes{argument});
return function_node;
}
QueryTreeNodePtr createTupleElementFunction(ContextPtr context, DataTypePtr result_type, QueryTreeNodePtr argument, UInt64 index)
QueryTreeNodePtr createTupleElementFunction(const ContextPtr & context, const DataTypePtr & result_type, QueryTreeNodePtr argument, UInt64 index)
{
return createResolvedFunction(context, "tupleElement", result_type, {argument, std::make_shared<ConstantNode>(index)});
return createResolvedFunction(context, "tupleElement", result_type, {std::move(argument), std::make_shared<ConstantNode>(index)});
}
QueryTreeNodePtr createArrayElementFunction(ContextPtr context, DataTypePtr result_type, QueryTreeNodePtr argument, UInt64 index)
QueryTreeNodePtr createArrayElementFunction(const ContextPtr & context, const DataTypePtr & result_type, QueryTreeNodePtr argument, UInt64 index)
{
return createResolvedFunction(context, "arrayElement", result_type, {argument, std::make_shared<ConstantNode>(index)});
return createResolvedFunction(context, "arrayElement", result_type, {std::move(argument), std::make_shared<ConstantNode>(index)});
}
void replaceWithSumCount(QueryTreeNodePtr & node, const FunctionNodePtr & sum_count_node, ContextPtr context)
@ -151,6 +132,7 @@ FunctionNodePtr createFusedQuantilesNode(const std::vector<QueryTreeNodePtr *> n
{
Array parameters;
parameters.reserve(nodes.size());
for (const auto * node : nodes)
{
const FunctionNode & function_node = (*node)->as<const FunctionNode &>();
@ -172,6 +154,7 @@ FunctionNodePtr createFusedQuantilesNode(const std::vector<QueryTreeNodePtr *> n
parameters.push_back(constant_value->getValue());
}
return createResolvedAggregateFunction("quantiles", argument, parameters);
}
@ -181,7 +164,7 @@ void tryFuseSumCountAvg(QueryTreeNodePtr query_tree_node, ContextPtr context)
FuseFunctionsVisitor visitor({"sum", "count", "avg"});
visitor.visit(query_tree_node);
for (auto & [argument, nodes] : visitor.mapping)
for (auto & [argument, nodes] : visitor.argument_to_functions_mapping)
{
if (nodes.size() < 2)
continue;
@ -199,24 +182,22 @@ void tryFuseQuantiles(QueryTreeNodePtr query_tree_node, ContextPtr context)
{
FuseFunctionsVisitor visitor_quantile({"quantile"});
visitor_quantile.visit(query_tree_node);
for (auto & [argument, nodes] : visitor_quantile.mapping)
for (auto & [argument, nodes] : visitor_quantile.argument_to_functions_mapping)
{
if (nodes.size() < 2)
size_t nodes_size = nodes.size();
if (nodes_size < 2)
continue;
auto quantiles_node = createFusedQuantilesNode(nodes, argument.node);
auto result_array_type = std::dynamic_pointer_cast<const DataTypeArray>(quantiles_node->getResultType());
if (!result_array_type)
{
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Unexpected return type '{}' of function '{}', should be array",
quantiles_node->getResultType(), quantiles_node->getFunctionName());
}
for (size_t i = 0; i < nodes.size(); ++i)
{
for (size_t i = 0; i < nodes_size; ++i)
*nodes[i] = createArrayElementFunction(context, result_array_type->getNestedType(), quantiles_node, i + 1);
}
}
}

View File

@ -3,6 +3,7 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/QueryNode.h>
#include <Analyzer/SortNode.h>
#include <Analyzer/HashUtils.h>
namespace DB
{
@ -10,35 +11,6 @@ namespace DB
namespace
{
struct QueryTreeNodeWithHash
{
explicit QueryTreeNodeWithHash(const IQueryTreeNode * node_)
: node(node_)
, hash(node->getTreeHash().first)
{}
const IQueryTreeNode * node = nullptr;
size_t hash = 0;
};
struct QueryTreeNodeWithHashHash
{
size_t operator()(const QueryTreeNodeWithHash & node_with_hash) const
{
return node_with_hash.hash;
}
};
struct QueryTreeNodeWithHashEqualTo
{
bool operator()(const QueryTreeNodeWithHash & lhs_node, const QueryTreeNodeWithHash & rhs_node) const
{
return lhs_node.hash == rhs_node.hash && lhs_node.node->isEqual(*rhs_node.node);
}
};
using QueryTreeNodeWithHashSet = std::unordered_set<QueryTreeNodeWithHash, QueryTreeNodeWithHashHash, QueryTreeNodeWithHashEqualTo>;
class OrderByLimitByDuplicateEliminationVisitor : public InDepthQueryTreeVisitor<OrderByLimitByDuplicateEliminationVisitor>
{
public:
@ -93,7 +65,7 @@ public:
}
private:
QueryTreeNodeWithHashSet unique_expressions_nodes_set;
QueryTreeNodeConstRawPtrWithHashSet unique_expressions_nodes_set;
};
}

View File

@ -67,6 +67,8 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/QueryTreeBuilder.h>
#include <Common/checkStackSize.h>
namespace DB
{
@ -517,7 +519,7 @@ public:
private:
QueryTreeNodes expressions;
std::unordered_map<std::string, std::vector<QueryTreeNodePtr>> alias_name_to_expressions;
std::unordered_map<std::string, QueryTreeNodes> alias_name_to_expressions;
};
/** Projection names is name of query tree node that is used in projection part of query node.
@ -1100,6 +1102,10 @@ private:
static void validateJoinTableExpressionWithoutAlias(const QueryTreeNodePtr & join_node, const QueryTreeNodePtr & table_expression_node, IdentifierResolveScope & scope);
static void expandGroupByAll(QueryNode & query_tree_node_typed);
static std::pair<bool, UInt64> recursivelyCollectMaxOrdinaryExpressions(QueryTreeNodePtr & node, QueryTreeNodes & into);
/// Resolve identifier functions
static QueryTreeNodePtr tryResolveTableIdentifierFromDatabaseCatalog(const Identifier & table_identifier, ContextPtr context);
@ -1929,6 +1935,68 @@ void QueryAnalyzer::validateJoinTableExpressionWithoutAlias(const QueryTreeNodeP
scope.scope_node->formatASTForErrorMessage());
}
std::pair<bool, UInt64> QueryAnalyzer::recursivelyCollectMaxOrdinaryExpressions(QueryTreeNodePtr & node, QueryTreeNodes & into)
{
checkStackSize();
if (node->as<ColumnNode>())
{
into.push_back(node);
return {false, 1};
}
auto * function = node->as<FunctionNode>();
if (!function)
return {false, 0};
if (function->isAggregateFunction())
return {true, 0};
UInt64 pushed_children = 0;
bool has_aggregate = false;
for (auto & child : function->getArguments().getNodes())
{
auto [child_has_aggregate, child_pushed_children] = recursivelyCollectMaxOrdinaryExpressions(child, into);
has_aggregate |= child_has_aggregate;
pushed_children += child_pushed_children;
}
/// The current function is not aggregate function and there is no aggregate function in its arguments,
/// so use the current function to replace its arguments
if (!has_aggregate)
{
for (UInt64 i = 0; i < pushed_children; i++)
into.pop_back();
into.push_back(node);
pushed_children = 1;
}
return {has_aggregate, pushed_children};
}
/** Expand GROUP BY ALL by extracting all the SELECT-ed expressions that are not aggregate functions.
*
* For a special case that if there is a function having both aggregate functions and other fields as its arguments,
* the `GROUP BY` keys will contain the maximum non-aggregate fields we can extract from it.
*
* Example:
* SELECT substring(a, 4, 2), substring(substring(a, 1, 2), 1, count(b)) FROM t GROUP BY ALL
* will expand as
* SELECT substring(a, 4, 2), substring(substring(a, 1, 2), 1, count(b)) FROM t GROUP BY substring(a, 4, 2), substring(a, 1, 2)
*/
void QueryAnalyzer::expandGroupByAll(QueryNode & query_tree_node_typed)
{
auto & group_by_nodes = query_tree_node_typed.getGroupBy().getNodes();
auto & projection_list = query_tree_node_typed.getProjection();
for (auto & node : projection_list.getNodes())
recursivelyCollectMaxOrdinaryExpressions(node, group_by_nodes);
}
/// Resolve identifier functions implementation
@ -2171,18 +2239,19 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromAliases(const Identifier
auto & alias_identifier_node = it->second->as<IdentifierNode &>();
auto identifier = alias_identifier_node.getIdentifier();
auto lookup_result = tryResolveIdentifier(IdentifierLookup{identifier, identifier_lookup.lookup_context}, scope, identifier_resolve_settings);
if (!lookup_result.isResolved())
if (!lookup_result.resolved_identifier)
{
std::unordered_set<Identifier> valid_identifiers;
collectScopeWithParentScopesValidIdentifiersForTypoCorrection(identifier, scope, true, false, false, valid_identifiers);
auto hints = collectIdentifierTypoHints(identifier, valid_identifiers);
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown {} identifier '{}' in scope {}{}",
toStringLowercase(IdentifierLookupContext::EXPRESSION),
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown {} identifier '{}'. In scope {}{}",
toStringLowercase(identifier_lookup.lookup_context),
identifier.getFullName(),
scope.scope_node->formatASTForErrorMessage(),
getHintsErrorMessageSuffix(hints));
}
it->second = lookup_result.resolved_identifier;
/** During collection of aliases if node is identifier and has alias, we cannot say if it is
@ -2193,9 +2262,9 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromAliases(const Identifier
* If we resolved identifier node as function, we must remove identifier node alias from
* expression alias map.
*/
if (identifier_lookup.isExpressionLookup() && it->second)
if (identifier_lookup.isExpressionLookup())
scope.alias_name_to_lambda_node.erase(identifier_bind_part);
else if (identifier_lookup.isFunctionLookup() && it->second)
else if (identifier_lookup.isFunctionLookup())
scope.alias_name_to_expression_node.erase(identifier_bind_part);
scope.expressions_in_resolve_process_stack.popNode();
@ -3203,11 +3272,9 @@ QueryAnalyzer::QueryTreeNodesWithNames QueryAnalyzer::resolveUnqualifiedMatcher(
if (auto * array_join_node = table_expression->as<ArrayJoinNode>())
{
size_t table_expressions_column_nodes_with_names_stack_size = table_expressions_column_nodes_with_names_stack.size();
if (table_expressions_column_nodes_with_names_stack_size < 1)
if (table_expressions_column_nodes_with_names_stack.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Expected at least 1 table expressions on stack before ARRAY JOIN processing. Actual {}",
table_expressions_column_nodes_with_names_stack_size);
"Expected at least 1 table expressions on stack before ARRAY JOIN processing");
auto & table_expression_column_nodes_with_names = table_expressions_column_nodes_with_names_stack.back();
@ -5388,25 +5455,7 @@ void QueryAnalyzer::resolveQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node,
}
}
/// TODO: Special functions that can take query
/// TODO: Support qualified matchers for table function
for (auto & argument_node : table_function_node.getArguments().getNodes())
{
if (argument_node->getNodeType() == QueryTreeNodeType::MATCHER)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Matcher as table function argument is not supported {}. In scope {}",
join_tree_node->formatASTForErrorMessage(),
scope.scope_node->formatASTForErrorMessage());
}
auto * function_node = argument_node->as<FunctionNode>();
if (function_node && table_function_factory.hasNameOrAlias(function_node->getFunctionName()))
continue;
resolveExpressionNode(argument_node, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
}
resolveExpressionNodeList(table_function_node.getArgumentsNode(), scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
auto table_function_ast = table_function_node.toAST();
table_function_ptr->parseArguments(table_function_ast, scope_context);
@ -6006,6 +6055,9 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
node->removeAlias();
}
if (query_node_typed.isGroupByAll())
expandGroupByAll(query_node_typed);
/** Validate aggregates
*
* 1. Check that there are no aggregate functions and GROUPING function in JOIN TREE, WHERE, PREWHERE, in another aggregate functions.

View File

@ -54,6 +54,9 @@ void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
if (is_group_by_with_totals)
buffer << ", is_group_by_with_totals: " << is_group_by_with_totals;
if (is_group_by_all)
buffer << ", is_group_by_all: " << is_group_by_all;
std::string group_by_type;
if (is_group_by_with_rollup)
group_by_type = "rollup";
@ -117,7 +120,7 @@ void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
getWhere()->dumpTreeImpl(buffer, format_state, indent + 4);
}
if (hasGroupBy())
if (!is_group_by_all && hasGroupBy())
{
buffer << '\n' << std::string(indent + 2, ' ') << "GROUP BY\n";
getGroupBy().dumpTreeImpl(buffer, format_state, indent + 4);
@ -198,7 +201,8 @@ bool QueryNode::isEqualImpl(const IQueryTreeNode & rhs) const
is_group_by_with_totals == rhs_typed.is_group_by_with_totals &&
is_group_by_with_rollup == rhs_typed.is_group_by_with_rollup &&
is_group_by_with_cube == rhs_typed.is_group_by_with_cube &&
is_group_by_with_grouping_sets == rhs_typed.is_group_by_with_grouping_sets;
is_group_by_with_grouping_sets == rhs_typed.is_group_by_with_grouping_sets &&
is_group_by_all == rhs_typed.is_group_by_all;
}
void QueryNode::updateTreeHashImpl(HashState & state) const
@ -226,6 +230,7 @@ void QueryNode::updateTreeHashImpl(HashState & state) const
state.update(is_group_by_with_rollup);
state.update(is_group_by_with_cube);
state.update(is_group_by_with_grouping_sets);
state.update(is_group_by_all);
if (constant_value)
{
@ -251,6 +256,7 @@ QueryTreeNodePtr QueryNode::cloneImpl() const
result_query_node->is_group_by_with_rollup = is_group_by_with_rollup;
result_query_node->is_group_by_with_cube = is_group_by_with_cube;
result_query_node->is_group_by_with_grouping_sets = is_group_by_with_grouping_sets;
result_query_node->is_group_by_all = is_group_by_all;
result_query_node->cte_name = cte_name;
result_query_node->projection_columns = projection_columns;
result_query_node->constant_value = constant_value;
@ -267,6 +273,7 @@ ASTPtr QueryNode::toASTImpl() const
select_query->group_by_with_rollup = is_group_by_with_rollup;
select_query->group_by_with_cube = is_group_by_with_cube;
select_query->group_by_with_grouping_sets = is_group_by_with_grouping_sets;
select_query->group_by_all = is_group_by_all;
if (hasWith())
select_query->setExpression(ASTSelectQuery::Expression::WITH, getWith().toAST());
@ -283,7 +290,7 @@ ASTPtr QueryNode::toASTImpl() const
if (getWhere())
select_query->setExpression(ASTSelectQuery::Expression::WHERE, getWhere()->toAST());
if (hasGroupBy())
if (!is_group_by_all && hasGroupBy())
select_query->setExpression(ASTSelectQuery::Expression::GROUP_BY, getGroupBy().toAST());
if (hasHaving())

View File

@ -176,6 +176,18 @@ public:
is_group_by_with_grouping_sets = is_group_by_with_grouping_sets_value;
}
/// Returns true, if query node has GROUP BY ALL modifier, false otherwise
bool isGroupByAll() const
{
return is_group_by_all;
}
/// Set query node GROUP BY ALL modifier value
void setIsGroupByAll(bool is_group_by_all_value)
{
is_group_by_all = is_group_by_all_value;
}
/// Returns true if query node WITH section is not empty, false otherwise
bool hasWith() const
{
@ -580,6 +592,7 @@ private:
bool is_group_by_with_rollup = false;
bool is_group_by_with_cube = false;
bool is_group_by_with_grouping_sets = false;
bool is_group_by_all = false;
std::string cte_name;
NamesAndTypes projection_columns;

View File

@ -215,6 +215,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
current_query_tree->setIsGroupByWithCube(select_query_typed.group_by_with_cube);
current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup);
current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets);
current_query_tree->setIsGroupByAll(select_query_typed.group_by_all);
current_query_tree->setOriginalAST(select_query);
auto select_settings = select_query_typed.settings();

View File

@ -110,12 +110,12 @@ void registerBackupEngineS3(BackupFactory & factory)
if (params.open_mode == IBackup::OpenMode::READ)
{
auto reader = std::make_shared<BackupReaderS3>(S3::URI{Poco::URI{s3_uri}}, access_key_id, secret_access_key, params.context);
auto reader = std::make_shared<BackupReaderS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context);
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
}
else
{
auto writer = std::make_shared<BackupWriterS3>(S3::URI{Poco::URI{s3_uri}}, access_key_id, secret_access_key, params.context);
auto writer = std::make_shared<BackupWriterS3>(S3::URI{s3_uri}, access_key_id, secret_access_key, params.context);
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid);
}
#else

View File

@ -1401,6 +1401,11 @@ try
QueryPipeline pipeline(std::move(pipe));
PullingAsyncPipelineExecutor executor(pipeline);
if (need_render_progress)
{
pipeline.setProgressCallback([this](const Progress & progress){ onProgress(progress); });
}
Block block;
while (executor.pull(block))
{
@ -1445,12 +1450,6 @@ catch (...)
void ClientBase::sendDataFromStdin(Block & sample, const ColumnsDescription & columns_description, ASTPtr parsed_query)
{
if (need_render_progress)
{
/// Add callback to track reading from fd.
std_in.setProgressCallback(global_context);
}
/// Send data read from stdin.
try
{

View File

@ -171,6 +171,11 @@ protected:
void initTtyBuffer(ProgressOption progress);
/// Should be one of the first, to be destroyed the last,
/// since other members can use them.
SharedContextHolder shared_context;
ContextMutablePtr global_context;
bool is_interactive = false; /// Use either interactive line editing interface or batch mode.
bool is_multiquery = false;
bool delayed_interactive = false;
@ -208,9 +213,6 @@ protected:
/// Settings specified via command line args
Settings cmd_settings;
SharedContextHolder shared_context;
ContextMutablePtr global_context;
/// thread status should be destructed before shared context because it relies on process list.
std::optional<ThreadStatus> thread_status;

View File

@ -524,11 +524,13 @@ void ColumnArray::insertRangeFrom(const IColumn & src, size_t start, size_t leng
size_t nested_offset = src_concrete.offsetAt(start);
size_t nested_length = src_concrete.getOffsets()[start + length - 1] - nested_offset;
Offsets & cur_offsets = getOffsets();
/// Reserve offsets before to make it more exception safe (in case of MEMORY_LIMIT_EXCEEDED)
cur_offsets.reserve(cur_offsets.size() + length);
getData().insertRangeFrom(src_concrete.getData(), nested_offset, nested_length);
Offsets & cur_offsets = getOffsets();
const Offsets & src_offsets = src_concrete.getOffsets();
if (start == 0 && cur_offsets.empty())
{
cur_offsets.assign(src_offsets.begin(), src_offsets.begin() + length);

View File

@ -124,6 +124,9 @@ void ColumnString::insertRangeFrom(const IColumn & src, size_t start, size_t len
size_t nested_offset = src_concrete.offsetAt(start);
size_t nested_length = src_concrete.offsets[start + length - 1] - nested_offset;
/// Reserve offsets before to make it more exception safe (in case of MEMORY_LIMIT_EXCEEDED)
offsets.reserve(offsets.size() + length);
size_t old_chars_size = chars.size();
chars.resize(old_chars_size + nested_length);
memcpy(&chars[old_chars_size], &src_concrete.chars[nested_offset], nested_length);

View File

@ -5,6 +5,7 @@
#define APPLY_FOR_METRICS(M) \
M(Query, "Number of executing queries") \
M(Merge, "Number of executing background merges") \
M(Move, "Number of currently executing moves") \
M(PartMutation, "Number of mutations (ALTER DELETE/UPDATE)") \
M(ReplicatedFetch, "Number of data parts being fetched from replica") \
M(ReplicatedSend, "Number of data parts being sent to replicas") \

View File

@ -27,6 +27,14 @@ public:
/// NOTE: Adding events into distant past (further than `period`) must be avoided.
void add(double now, double count)
{
// Remove data for initial heating stage that can present at the beginning of a query.
// Otherwise it leads to wrong gradual increase of average value, turning algorithm into not very reactive.
if (count != 0.0 && ++data_points < 5)
{
start = events.time;
events = ExponentiallySmoothedAverage();
}
if (now - period <= start) // precise counting mode
events = ExponentiallySmoothedAverage(events.value + count, now);
else // exponential smoothing mode
@ -51,6 +59,7 @@ public:
{
start = now;
events = ExponentiallySmoothedAverage();
data_points = 0;
}
private:
@ -58,6 +67,7 @@ private:
const double half_decay_time;
double start; // Instant in past without events before it; when measurement started or reset
ExponentiallySmoothedAverage events; // Estimated number of events in the last `period`
size_t data_points = 0;
};
}

View File

@ -15,6 +15,7 @@
#include <Common/formatReadable.h>
#include <Common/filesystemHelpers.h>
#include <Common/ErrorCodes.h>
#include <Common/SensitiveDataMasker.h>
#include <Common/LockMemoryExceptionInThread.h>
#include <filesystem>
@ -63,11 +64,18 @@ void handle_error_code([[maybe_unused]] const std::string & msg, int code, bool
ErrorCodes::increment(code, remote, msg, trace);
}
Exception::Exception(const std::string & msg, int code, bool remote_)
: Poco::Exception(msg, code)
Exception::MessageMasked::MessageMasked(const std::string & msg_)
: msg(msg_)
{
if (auto * masker = SensitiveDataMasker::getInstance())
masker->wipeSensitiveData(msg);
}
Exception::Exception(const MessageMasked & msg_masked, int code, bool remote_)
: Poco::Exception(msg_masked.msg, code)
, remote(remote_)
{
handle_error_code(msg, code, remote, getStackFramePointers());
handle_error_code(msg_masked.msg, code, remote, getStackFramePointers());
}
Exception::Exception(CreateFromPocoTag, const Poco::Exception & exc)

View File

@ -27,7 +27,19 @@ public:
using FramePointers = std::vector<void *>;
Exception() = default;
Exception(const std::string & msg, int code, bool remote_ = false);
// used to remove the sensitive information from exceptions if query_masking_rules is configured
struct MessageMasked
{
std::string msg;
MessageMasked(const std::string & msg_);
};
Exception(const MessageMasked & msg_masked, int code, bool remote_);
// delegating constructor to mask sensitive information from the message
Exception(const std::string & msg, int code, bool remote_ = false): Exception(MessageMasked(msg), code, remote_)
{}
Exception(int code, const std::string & message)
: Exception(message, code)
@ -54,12 +66,17 @@ public:
template <typename... Args>
void addMessage(fmt::format_string<Args...> format, Args &&... args)
{
extendedMessage(fmt::format(format, std::forward<Args>(args)...));
addMessage(fmt::format(format, std::forward<Args>(args)...));
}
void addMessage(const std::string& message)
{
extendedMessage(message);
addMessage(MessageMasked(message));
}
void addMessage(const MessageMasked & msg_masked)
{
extendedMessage(msg_masked.msg);
}
/// Used to distinguish local exceptions from the one that was received from remote node.

View File

@ -220,7 +220,7 @@ void MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceeded, MemoryT
Int64 limit_to_check = current_hard_limit;
#if USE_JEMALLOC
if (level == VariableContext::Global)
if (level == VariableContext::Global && allow_use_jemalloc_memory.load(std::memory_order_relaxed))
{
/// Jemalloc arenas may keep some extra memory.
/// This memory was substucted from RSS to decrease memory drift.

View File

@ -55,6 +55,7 @@ private:
std::atomic<Int64> soft_limit {0};
std::atomic<Int64> hard_limit {0};
std::atomic<Int64> profiler_limit {0};
std::atomic_bool allow_use_jemalloc_memory {true};
static std::atomic<Int64> free_memory_in_allocator_arenas;
@ -125,6 +126,10 @@ public:
{
return soft_limit.load(std::memory_order_relaxed);
}
void setAllowUseJemallocMemory(bool value)
{
allow_use_jemalloc_memory.store(value, std::memory_order_relaxed);
}
/** Set limit if it was not set.
* Otherwise, set limit to new value, if new value is greater than previous limit.

View File

@ -90,7 +90,7 @@ private:
bool write_progress_on_update = false;
EventRateMeter cpu_usage_meter{static_cast<double>(clock_gettime_ns()), 3'000'000'000 /*ns*/}; // average cpu utilization last 3 second
EventRateMeter cpu_usage_meter{static_cast<double>(clock_gettime_ns()), 2'000'000'000 /*ns*/}; // average cpu utilization last 2 second
HostToThreadTimesMap thread_data;
/// In case of all of the above:
/// - clickhouse-local

View File

@ -189,6 +189,13 @@ public:
finalize();
return v0 ^ v1 ^ v2 ^ v3;
}
UInt128 get128()
{
UInt128 res;
get128(res);
return res;
}
};
@ -208,9 +215,7 @@ inline UInt128 sipHash128(const char * data, const size_t size)
{
SipHash hash;
hash.update(data, size);
UInt128 res;
hash.get128(res);
return res;
return hash.get128();
}
inline UInt64 sipHash64(const char * data, const size_t size)

View File

@ -8,6 +8,7 @@
#include "hasLinuxCapability.h"
#include <base/unaligned.h>
#include <Common/logger_useful.h>
#include <cerrno>
#include <cstdio>
@ -205,6 +206,20 @@ bool checkPermissionsImpl()
{
TaskStatsInfoGetter();
}
catch (const Exception & e)
{
if (e.code() == ErrorCodes::NETLINK_ERROR)
{
/// This error happens all the time when running inside Docker - consider it ok,
/// don't create noise with this error.
LOG_DEBUG(&Poco::Logger::get(__PRETTY_FUNCTION__), "{}", getCurrentExceptionMessage(false));
}
else
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
return false;
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);

View File

@ -1,6 +1,7 @@
#pragma once
#include <base/types.h>
#include <base/getThreadId.h>
#include <Common/ProfileEvents.h>
#include <sys/time.h>
#include <sys/resource.h>
@ -47,6 +48,8 @@ struct RUsageCounters
UInt64 soft_page_faults = 0;
UInt64 hard_page_faults = 0;
UInt64 thread_id = 0;
RUsageCounters() = default;
RUsageCounters(const ::rusage & rusage_, UInt64 real_time_)
{
@ -61,6 +64,8 @@ struct RUsageCounters
soft_page_faults = static_cast<UInt64>(rusage.ru_minflt);
hard_page_faults = static_cast<UInt64>(rusage.ru_majflt);
thread_id = getThreadId();
}
static RUsageCounters current()
@ -78,6 +83,12 @@ struct RUsageCounters
static void incrementProfileEvents(const RUsageCounters & prev, const RUsageCounters & curr, ProfileEvents::Counters & profile_events)
{
chassert(prev.thread_id == curr.thread_id);
/// LONG_MAX is ~106751 days
chassert(curr.real_time - prev.real_time < LONG_MAX);
chassert(curr.user_time - prev.user_time < LONG_MAX);
chassert(curr.sys_time - prev.sys_time < LONG_MAX);
profile_events.increment(ProfileEvents::RealTimeMicroseconds, (curr.real_time - prev.real_time) / 1000U);
profile_events.increment(ProfileEvents::UserTimeMicroseconds, (curr.user_time - prev.user_time) / 1000U);
profile_events.increment(ProfileEvents::SystemTimeMicroseconds, (curr.sys_time - prev.sys_time) / 1000U);

View File

@ -179,8 +179,8 @@ protected:
/// Is used to send logs from logs_queue to client in case of fatal errors.
std::function<void()> fatal_error_callback;
/// It is used to avoid enabling the query profiler when you have multiple ThreadStatus in the same thread
bool query_profiler_enabled = true;
/// See setInternalThread()
bool internal_thread = false;
/// Requires access to query_id.
friend class MemoryTrackerThreadSwitcher;
@ -225,11 +225,21 @@ public:
return global_context.lock();
}
void disableProfiling()
{
assert(!query_profiler_real && !query_profiler_cpu);
query_profiler_enabled = false;
}
/// "Internal" ThreadStatus is used for materialized views for separate
/// tracking into system.query_views_log
///
/// You can have multiple internal threads, but only one non-internal with
/// the same thread_id.
///
/// "Internal" thread:
/// - cannot have query profiler
/// since the running (main query) thread should already have one
/// - should not try to obtain latest counter on detach
/// because detaching of such threads will be done from a different
/// thread_id, and some counters are not available (i.e. getrusage()),
/// but anyway they are accounted correctly in the main ThreadStatus of a
/// query.
void setInternalThread();
/// Starts new query and create new thread group for it, current thread becomes master thread of the query
void initializeQuery();

View File

@ -1,6 +1,6 @@
#pragma once
#include <string>
#include <Core/Types.h>
/// Maps 0..15 to 0..9A..F or 0..9a..f correspondingly.
@ -50,17 +50,32 @@ inline void writeBinByte(UInt8 byte, void * out)
template <typename TUInt>
inline void writeHexUIntImpl(TUInt uint_, char * out, const char * const table)
{
union
if constexpr (is_integer<TUInt>)
{
TUInt value;
UInt8 uint8[sizeof(TUInt)];
};
/// For integer types, use endian indepentant way for conversion
TUInt value = uint_;
value = uint_;
for (size_t i = 0; i < sizeof(TUInt); ++i)
{
memcpy(out + (sizeof(TUInt) - 1 - i) * 2, &table[static_cast<size_t>(value % 256) * 2], 2);
value /= 256;
}
}
else
{
/// For non-integer types, access memory directly for conversion to keep back-compatibility
union
{
TUInt value;
UInt8 uint8[sizeof(TUInt)];
};
/// Use little endian
for (size_t i = 0; i < sizeof(TUInt); ++i)
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
value = uint_;
/// Use little endian
for (size_t i = 0; i < sizeof(TUInt); ++i)
memcpy(out + i * 2, &table[static_cast<size_t>(uint8[sizeof(TUInt) - 1 - i]) * 2], 2);
}
}
template <typename TUInt>

View File

@ -36,7 +36,7 @@ void CoordinationSettings::loadFromConfig(const String & config_elem, const Poco
}
const String KeeperConfigurationAndSettings::DEFAULT_FOUR_LETTER_WORD_CMD = "conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif";
const String KeeperConfigurationAndSettings::DEFAULT_FOUR_LETTER_WORD_CMD = "conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro,rcvr,apiv,csnp,lgif,rqld";
KeeperConfigurationAndSettings::KeeperConfigurationAndSettings()
: server_id(NOT_EXIST)

View File

@ -142,6 +142,9 @@ void FourLetterCommandFactory::registerCommands(KeeperDispatcher & keeper_dispat
FourLetterCommandPtr log_info_command = std::make_shared<LogInfoCommand>(keeper_dispatcher);
factory.registerCommand(log_info_command);
FourLetterCommandPtr request_leader_command = std::make_shared<RequestLeaderCommand>(keeper_dispatcher);
factory.registerCommand(request_leader_command);
factory.initializeAllowList(keeper_dispatcher);
factory.setInitialize(true);
}
@ -507,4 +510,9 @@ String LogInfoCommand::run()
return ret.str();
}
String RequestLeaderCommand::run()
{
return keeper_dispatcher.requestLeader() ? "Sent leadership request to leader." : "Failed to send leadership request to leader.";
}
}

View File

@ -364,4 +364,17 @@ struct LogInfoCommand : public IFourLetterCommand
~LogInfoCommand() override = default;
};
/// Request to be leader.
struct RequestLeaderCommand : public IFourLetterCommand
{
explicit RequestLeaderCommand(KeeperDispatcher & keeper_dispatcher_)
: IFourLetterCommand(keeper_dispatcher_)
{
}
String name() override { return "rqld"; }
String run() override;
~RequestLeaderCommand() override = default;
};
}

View File

@ -215,6 +215,12 @@ public:
{
return server->getKeeperLogInfo();
}
/// Request to be leader.
bool requestLeader()
{
return server->requestLeader();
}
};
}

View File

@ -932,4 +932,9 @@ KeeperLogInfo KeeperServer::getKeeperLogInfo()
return log_info;
}
bool KeeperServer::requestLeader()
{
return isLeader() || raft_instance->request_leadership();
}
}

View File

@ -135,6 +135,8 @@ public:
uint64_t createSnapshot();
KeeperLogInfo getKeeperLogInfo();
bool requestLeader();
};
}

View File

@ -65,7 +65,7 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo
auto auth_settings = S3::AuthSettings::loadFromConfig(config_prefix, config);
auto endpoint = config.getString(config_prefix + ".endpoint");
auto new_uri = S3::URI{Poco::URI(endpoint)};
auto new_uri = S3::URI{endpoint};
{
std::lock_guard client_lock{snapshot_s3_client_mutex};

View File

@ -667,9 +667,15 @@ Names Block::getDataTypeNames() const
}
std::unordered_map<String, size_t> Block::getNamesToIndexesMap() const
Block::NameMap Block::getNamesToIndexesMap() const
{
return index_by_name;
NameMap res;
res.reserve(index_by_name.size());
for (const auto & [name, index] : index_by_name)
res[name] = index;
return res;
}

View File

@ -5,6 +5,8 @@
#include <Core/ColumnsWithTypeAndName.h>
#include <Core/NamesAndTypes.h>
#include <Common/HashTable/HashMap.h>
#include <initializer_list>
#include <list>
#include <map>
@ -93,7 +95,10 @@ public:
Names getNames() const;
DataTypes getDataTypes() const;
Names getDataTypeNames() const;
std::unordered_map<String, size_t> getNamesToIndexesMap() const;
/// Hash table match `column name -> position in the block`.
using NameMap = HashMap<StringRef, size_t, StringRefHash>;
NameMap getNamesToIndexesMap() const;
Serializations getSerializations() const;

View File

@ -851,6 +851,9 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
M(Bool, output_format_sql_insert_include_column_names, true, "Include column names in INSERT query", 0) \
M(Bool, output_format_sql_insert_use_replace, false, "Use REPLACE statement instead of INSERT", 0) \
M(Bool, output_format_sql_insert_quote_names, true, "Quote column names with '`' characters", 0) \
\
M(Bool, output_format_bson_string_as_string, false, "Use BSON String type instead of Binary for String columns.", 0) \
M(Bool, input_format_bson_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip fields with unsupported types while schema inference for format BSON.", 0) \
// End of FORMAT_FACTORY_SETTINGS
// Please add settings non-related to formats into the COMMON_SETTINGS above.

View File

@ -284,7 +284,7 @@ std::vector<DictionaryAttribute> DictionaryStructure::getAttributes(
std::unordered_set<String> attribute_names;
std::vector<DictionaryAttribute> res_attributes;
const FormatSettings format_settings;
const FormatSettings format_settings = {};
for (const auto & config_elem : config_elems)
{

View File

@ -62,7 +62,7 @@ struct ExternalQueryBuilder
private:
const FormatSettings format_settings;
const FormatSettings format_settings = {};
void composeLoadAllQuery(WriteBuffer & out) const;

View File

@ -74,7 +74,6 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory)
// Poco/MongoDB/BSONWriter.h:54: void writeCString(const std::string & value);
// src/IO/WriteHelpers.h:146 #define writeCString(s, buf)
#include <IO/WriteHelpers.h>
#include <Processors/Transforms/MongoDBSource.h>
namespace DB

View File

@ -1,5 +1,6 @@
#pragma once
#include <Processors/Transforms/MongoDBSource.h>
#include <Core/Block.h>
#include "DictionaryStructure.h"

View File

@ -4,7 +4,10 @@
namespace DB
{
DiskDecorator::DiskDecorator(const DiskPtr & delegate_) : delegate(delegate_)
DiskDecorator::DiskDecorator(const DiskPtr & delegate_)
: IDisk(/* name_= */ "<decorator>")
, delegate(delegate_)
{
}
@ -226,9 +229,9 @@ void DiskDecorator::shutdown()
delegate->shutdown();
}
void DiskDecorator::startup(ContextPtr context)
void DiskDecorator::startupImpl(ContextPtr context)
{
delegate->startup(context);
delegate->startupImpl(context);
}
void DiskDecorator::applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & map)

View File

@ -81,7 +81,7 @@ public:
void onFreeze(const String & path) override;
SyncGuardPtr getDirectorySyncGuard(const String & path) const override;
void shutdown() override;
void startup(ContextPtr context) override;
void startupImpl(ContextPtr context) override;
void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & map) override;
bool supportsCache() const override { return delegate->supportsCache(); }

View File

@ -210,7 +210,7 @@ DiskEncrypted::DiskEncrypted(
DiskEncrypted::DiskEncrypted(const String & name_, std::unique_ptr<const DiskEncryptedSettings> settings_)
: DiskDecorator(settings_->wrapped_disk)
, name(name_)
, encrypted_name(name_)
, disk_path(settings_->disk_path)
, disk_absolute_path(settings_->wrapped_disk->getPath() + settings_->disk_path)
, current_settings(std::move(settings_))
@ -369,15 +369,19 @@ void DiskEncrypted::applyNewSettings(
current_settings.set(std::move(new_settings));
}
void registerDiskEncrypted(DiskFactory & factory)
void registerDiskEncrypted(DiskFactory & factory, bool global_skip_access_check)
{
auto creator = [](const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr /*context*/,
const DisksMap & map) -> DiskPtr
auto creator = [global_skip_access_check](
const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & map) -> DiskPtr
{
return std::make_shared<DiskEncrypted>(name, config, config_prefix, map);
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
DiskPtr disk = std::make_shared<DiskEncrypted>(name, config, config_prefix, map);
disk->startup(context, skip_access_check);
return disk;
};
factory.registerDiskType("encrypted", creator);
}

View File

@ -33,7 +33,7 @@ public:
DiskEncrypted(const String & name_, const Poco::Util::AbstractConfiguration & config_, const String & config_prefix_, const DisksMap & map_);
DiskEncrypted(const String & name_, std::unique_ptr<const DiskEncryptedSettings> settings_);
const String & getName() const override { return name; }
const String & getName() const override { return encrypted_name; }
const String & getPath() const override { return disk_absolute_path; }
ReservationPtr reserve(UInt64 bytes) override;
@ -261,7 +261,7 @@ private:
return disk_path + path;
}
const String name;
const String encrypted_name;
const String disk_path;
const String disk_absolute_path;
MultiVersion<DiskEncryptedSettings> current_settings;

View File

@ -500,7 +500,7 @@ void DiskLocal::applyNewSettings(const Poco::Util::AbstractConfiguration & confi
}
DiskLocal::DiskLocal(const String & name_, const String & path_, UInt64 keep_free_space_bytes_)
: name(name_)
: IDisk(name_)
, disk_path(path_)
, keep_free_space_bytes(keep_free_space_bytes_)
, logger(&Poco::Logger::get("DiskLocal"))
@ -528,26 +528,6 @@ DataSourceDescription DiskLocal::getDataSourceDescription() const
return data_source_description;
}
void DiskLocal::startup(ContextPtr)
{
try
{
broken = false;
disk_checker_magic_number = -1;
disk_checker_can_check_read = true;
readonly = !setup();
}
catch (...)
{
tryLogCurrentException(logger, fmt::format("Disk {} is marked as broken during startup", name));
broken = true;
/// Disk checker is disabled when failing to start up.
disk_checker_can_check_read = false;
}
if (disk_checker && disk_checker_can_check_read)
disk_checker->startup();
}
void DiskLocal::shutdown()
{
if (disk_checker)
@ -641,18 +621,30 @@ DiskObjectStoragePtr DiskLocal::createDiskObjectStorage()
);
}
bool DiskLocal::setup()
void DiskLocal::checkAccessImpl(const String & path)
{
try
{
fs::create_directories(disk_path);
if (!FS::canWrite(disk_path))
{
LOG_ERROR(logger, "Cannot write to the root directory of disk {} ({}).", name, disk_path);
readonly = true;
return;
}
}
catch (...)
{
LOG_ERROR(logger, "Cannot create the directory of disk {} ({}).", name, disk_path);
throw;
LOG_ERROR(logger, "Cannot create the root directory of disk {} ({}).", name, disk_path);
readonly = true;
return;
}
IDisk::checkAccessImpl(path);
}
void DiskLocal::setup()
{
try
{
if (!FS::canRead(disk_path))
@ -666,7 +658,7 @@ bool DiskLocal::setup()
/// If disk checker is disabled, just assume RW by default.
if (!disk_checker)
return true;
return;
try
{
@ -690,6 +682,7 @@ bool DiskLocal::setup()
/// Try to create a new checker file. The disk status can be either broken or readonly.
if (disk_checker_magic_number == -1)
{
try
{
pcg32_fast rng(randomSeed());
@ -709,12 +702,33 @@ bool DiskLocal::setup()
disk_checker_path,
name);
disk_checker_can_check_read = false;
return true;
return;
}
}
if (disk_checker_magic_number == -1)
throw Exception("disk_checker_magic_number is not initialized. It's a bug", ErrorCodes::LOGICAL_ERROR);
return true;
}
void DiskLocal::startupImpl(ContextPtr)
{
broken = false;
disk_checker_magic_number = -1;
disk_checker_can_check_read = true;
try
{
setup();
}
catch (...)
{
tryLogCurrentException(logger, fmt::format("Disk {} is marked as broken during startup", name));
broken = true;
/// Disk checker is disabled when failing to start up.
disk_checker_can_check_read = false;
}
if (disk_checker && disk_checker_can_check_read)
disk_checker->startup();
}
struct stat DiskLocal::stat(const String & path) const
@ -741,13 +755,14 @@ MetadataStoragePtr DiskLocal::getMetadataStorage()
std::static_pointer_cast<IDisk>(shared_from_this()), object_storage, getPath());
}
void registerDiskLocal(DiskFactory & factory)
void registerDiskLocal(DiskFactory & factory, bool global_skip_access_check)
{
auto creator = [](const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & map) -> DiskPtr
auto creator = [global_skip_access_check](
const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & map) -> DiskPtr
{
String path;
UInt64 keep_free_space_bytes;
@ -757,9 +772,10 @@ void registerDiskLocal(DiskFactory & factory)
if (path == disk_ptr->getPath())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk {} and disk {} cannot have the same path ({})", name, disk_name, path);
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
std::shared_ptr<IDisk> disk
= std::make_shared<DiskLocal>(name, path, keep_free_space_bytes, context, config.getUInt("local_disk_check_period_ms", 0));
disk->startup(context);
disk->startup(context, skip_access_check);
return std::make_shared<DiskRestartProxy>(disk);
};
factory.registerDiskType("local", creator);

View File

@ -28,8 +28,6 @@ public:
ContextPtr context,
UInt64 local_disk_check_period_ms);
const String & getName() const override { return name; }
const String & getPath() const override { return disk_path; }
ReservationPtr reserve(UInt64 bytes) override;
@ -112,8 +110,9 @@ public:
void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap &) override;
bool isBroken() const override { return broken; }
bool isReadOnly() const override { return readonly; }
void startup(ContextPtr) override;
void startupImpl(ContextPtr context) override;
void shutdown() override;
@ -133,17 +132,19 @@ public:
MetadataStoragePtr getMetadataStorage() override;
protected:
void checkAccessImpl(const String & path) override;
private:
std::optional<UInt64> tryReserve(UInt64 bytes);
/// Setup disk for healthy check. Returns true if it's read-write, false if read-only.
/// Setup disk for healthy check.
/// Throw exception if it's not possible to setup necessary files and directories.
bool setup();
void setup();
/// Read magic number from disk checker file. Return std::nullopt if exception happens.
std::optional<UInt32> readDiskCheckerMagicNumber() const noexcept;
const String name;
const String disk_path;
const String disk_checker_path = ".disk_checker_file";
std::atomic<UInt64> keep_free_space_bytes;

View File

@ -141,6 +141,11 @@ private:
};
DiskMemory::DiskMemory(const String & name_)
: IDisk(name_)
, disk_path("memory(" + name_ + ')')
{}
ReservationPtr DiskMemory::reserve(UInt64 /*bytes*/)
{
throw Exception("Method reserve is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
@ -456,13 +461,20 @@ MetadataStoragePtr DiskMemory::getMetadataStorage()
using DiskMemoryPtr = std::shared_ptr<DiskMemory>;
void registerDiskMemory(DiskFactory & factory)
void registerDiskMemory(DiskFactory & factory, bool global_skip_access_check)
{
auto creator = [](const String & name,
const Poco::Util::AbstractConfiguration & /*config*/,
const String & /*config_prefix*/,
ContextPtr /*context*/,
const DisksMap & /*map*/) -> DiskPtr { return std::make_shared<DiskMemory>(name); };
auto creator = [global_skip_access_check](
const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
{
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
DiskPtr disk = std::make_shared<DiskMemory>(name);
disk->startup(context, skip_access_check);
return disk;
};
factory.registerDiskType("memory", creator);
}

View File

@ -8,7 +8,7 @@
namespace DB
{
class DiskMemory;
class ReadBufferFromFileBase;
class WriteBufferFromFileBase;
@ -22,9 +22,7 @@ class WriteBufferFromFileBase;
class DiskMemory : public IDisk
{
public:
explicit DiskMemory(const String & name_) : name(name_), disk_path("memory://" + name_ + '/') {}
const String & getName() const override { return name; }
explicit DiskMemory(const String & name_);
const String & getPath() const override { return disk_path; }
@ -121,7 +119,6 @@ private:
};
using Files = std::unordered_map<String, FileData>; /// file path -> file data
const String name;
const String disk_path;
Files files;
mutable std::mutex mutex;

View File

@ -79,7 +79,8 @@ private:
};
DiskRestartProxy::DiskRestartProxy(DiskPtr & delegate_)
: DiskDecorator(delegate_) { }
: DiskDecorator(delegate_)
{}
ReservationPtr DiskRestartProxy::reserve(UInt64 bytes)
{
@ -368,7 +369,8 @@ void DiskRestartProxy::restart(ContextPtr context)
LOG_INFO(log, "Restart lock acquired. Restarting disk {}", DiskDecorator::getName());
DiskDecorator::startup(context);
/// NOTE: access checking will cause deadlock here, so skip it.
DiskDecorator::startup(context, /* skip_access_check= */ true);
LOG_INFO(log, "Disk restarted {}", DiskDecorator::getName());
}

View File

@ -6,6 +6,7 @@
#include <Poco/Logger.h>
#include <Common/logger_useful.h>
#include <Common/setThreadName.h>
#include <Core/ServerUUID.h>
#include <Disks/ObjectStorages/MetadataStorageFromDisk.h>
#include <Disks/ObjectStorages/FakeMetadataStorageFromDisk.h>
#include <Disks/ObjectStorages/LocalObjectStorage.h>
@ -17,6 +18,8 @@ namespace DB
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
extern const int CANNOT_READ_ALL_DATA;
extern const int LOGICAL_ERROR;
}
bool IDisk::isDirectoryEmpty(const String & path) const
@ -126,4 +129,87 @@ SyncGuardPtr IDisk::getDirectorySyncGuard(const String & /* path */) const
return nullptr;
}
void IDisk::startup(ContextPtr context, bool skip_access_check)
{
if (!skip_access_check)
{
if (isReadOnly())
{
LOG_DEBUG(&Poco::Logger::get("IDisk"),
"Skip access check for disk {} (read-only disk).",
getName());
}
else
checkAccess();
}
startupImpl(context);
}
void IDisk::checkAccess()
{
DB::UUID server_uuid = DB::ServerUUID::get();
if (server_uuid == DB::UUIDHelpers::Nil)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Server UUID is not initialized");
const String path = fmt::format("clickhouse_access_check_{}", DB::toString(server_uuid));
checkAccessImpl(path);
}
/// NOTE: should we mark the disk readonly if the write/unlink fails instead of throws?
void IDisk::checkAccessImpl(const String & path)
try
{
const std::string_view payload("test", 4);
/// write
{
auto file = writeFile(path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
try
{
file->write(payload.data(), payload.size());
}
catch (...)
{
/// Log current exception, because finalize() can throw a different exception.
tryLogCurrentException(__PRETTY_FUNCTION__);
file->finalize();
throw;
}
}
/// read
{
auto file = readFile(path);
String buf(payload.size(), '0');
file->readStrict(buf.data(), buf.size());
if (buf != payload)
{
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA,
"Content of {}::{} does not matches after read ({} vs {})", name, path, buf, payload);
}
}
/// read with offset
{
auto file = readFile(path);
auto offset = 2;
String buf(payload.size() - offset, '0');
file->seek(offset, 0);
file->readStrict(buf.data(), buf.size());
if (buf != payload.substr(offset))
{
throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA,
"Content of {}::{} does not matches after read with offset ({} vs {})", name, path, buf, payload.substr(offset));
}
}
/// remove
removeFile(path);
}
catch (Exception & e)
{
e.addMessage(fmt::format("While checking access for disk {}", name));
throw;
}
}

View File

@ -107,8 +107,9 @@ class IDisk : public Space
{
public:
/// Default constructor.
explicit IDisk(std::shared_ptr<Executor> executor_ = std::make_shared<SyncExecutor>())
: executor(executor_)
explicit IDisk(const String & name_, std::shared_ptr<Executor> executor_ = std::make_shared<SyncExecutor>())
: name(name_)
, executor(executor_)
{
}
@ -121,6 +122,9 @@ public:
/// It's not required to be a local filesystem path.
virtual const String & getPath() const = 0;
/// Return disk name.
const String & getName() const override { return name; }
/// Total available space on the disk.
virtual UInt64 getTotalSpace() const = 0;
@ -316,8 +320,11 @@ public:
/// Invoked when Global Context is shutdown.
virtual void shutdown() {}
/// Performs action on disk startup.
virtual void startup(ContextPtr) {}
/// Performs access check and custom action on disk startup.
void startup(ContextPtr context, bool skip_access_check);
/// Performs custom action on disk startup.
virtual void startupImpl(ContextPtr) {}
/// Return some uniq string for file, overrode for IDiskRemote
/// Required for distinguish different copies of the same part on remote disk
@ -400,6 +407,8 @@ public:
protected:
friend class DiskDecorator;
const String name;
/// Returns executor to perform asynchronous operations.
virtual Executor & getExecutor() { return *executor; }
@ -408,8 +417,13 @@ protected:
/// A derived class may override copy() to provide a faster implementation.
void copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir = true);
virtual void checkAccessImpl(const String & path);
private:
std::shared_ptr<Executor> executor;
/// Check access to the disk.
void checkAccess();
};
using Disks = std::vector<DiskPtr>;

View File

@ -42,7 +42,7 @@ std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(
if (read_hint.has_value())
estimated_size = *read_hint;
else if (file_size.has_value())
estimated_size = file_size.has_value() ? *file_size : 0;
estimated_size = *file_size;
if (!existing_memory
&& settings.local_fs_method == LocalFSReadMethod::mmap
@ -158,7 +158,15 @@ std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(
#endif
ProfileEvents::increment(ProfileEvents::CreatedReadBufferOrdinary);
return create(settings.local_fs_buffer_size, flags);
size_t buffer_size = settings.local_fs_buffer_size;
/// Check if the buffer can be smaller than default
if (read_hint.has_value() && *read_hint > 0 && *read_hint < buffer_size)
buffer_size = *read_hint;
if (file_size.has_value() && *file_size < buffer_size)
buffer_size = *file_size;
return create(buffer_size, flags);
}
}

View File

@ -17,55 +17,9 @@
namespace DB
{
namespace ErrorCodes
void registerDiskAzureBlobStorage(DiskFactory & factory, bool global_skip_access_check)
{
extern const int PATH_ACCESS_DENIED;
}
namespace
{
constexpr char test_file[] = "test.txt";
constexpr char test_str[] = "test";
constexpr size_t test_str_size = 4;
void checkWriteAccess(IDisk & disk)
{
auto file = disk.writeFile(test_file, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
file->write(test_str, test_str_size);
}
void checkReadAccess(IDisk & disk)
{
auto file = disk.readFile(test_file);
String buf(test_str_size, '0');
file->readStrict(buf.data(), test_str_size);
if (buf != test_str)
throw Exception("No read access to disk", ErrorCodes::PATH_ACCESS_DENIED);
}
void checkReadWithOffset(IDisk & disk)
{
auto file = disk.readFile(test_file);
auto offset = 2;
auto test_size = test_str_size - offset;
String buf(test_size, '0');
file->seek(offset, 0);
file->readStrict(buf.data(), test_size);
if (buf != test_str + offset)
throw Exception("Failed to read file with offset", ErrorCodes::PATH_ACCESS_DENIED);
}
void checkRemoveAccess(IDisk & disk)
{
disk.removeFile(test_file);
}
}
void registerDiskAzureBlobStorage(DiskFactory & factory)
{
auto creator = [](
auto creator = [global_skip_access_check](
const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
@ -94,15 +48,8 @@ void registerDiskAzureBlobStorage(DiskFactory & factory)
copy_thread_pool_size
);
if (!config.getBool(config_prefix + ".skip_access_check", false))
{
checkWriteAccess(*azure_blob_storage_disk);
checkReadAccess(*azure_blob_storage_disk);
checkReadWithOffset(*azure_blob_storage_disk);
checkRemoveAccess(*azure_blob_storage_disk);
}
azure_blob_storage_disk->startup(context);
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
azure_blob_storage_disk->startup(context, skip_access_check);
return std::make_shared<DiskRestartProxy>(azure_blob_storage_disk);
};
@ -117,7 +64,7 @@ void registerDiskAzureBlobStorage(DiskFactory & factory)
namespace DB
{
void registerDiskAzureBlobStorage(DiskFactory &) {}
void registerDiskAzureBlobStorage(DiskFactory &, bool /* global_skip_access_check */) {}
}

View File

@ -16,7 +16,7 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
void registerDiskCache(DiskFactory & factory)
void registerDiskCache(DiskFactory & factory, bool /* global_skip_access_check */)
{
auto creator = [](const String & name,
const Poco::Util::AbstractConfiguration & config,

View File

@ -109,8 +109,7 @@ DiskObjectStorage::DiskObjectStorage(
ObjectStoragePtr object_storage_,
bool send_metadata_,
uint64_t thread_pool_size_)
: IDisk(getAsyncExecutor(log_name, thread_pool_size_))
, name(name_)
: IDisk(name_, getAsyncExecutor(log_name, thread_pool_size_))
, object_storage_root_path(object_storage_root_path_)
, log (&Poco::Logger::get("DiskObjectStorage(" + log_name + ")"))
, metadata_storage(std::move(metadata_storage_))
@ -420,9 +419,8 @@ void DiskObjectStorage::shutdown()
LOG_INFO(log, "Disk {} shut down", name);
}
void DiskObjectStorage::startup(ContextPtr context)
void DiskObjectStorage::startupImpl(ContextPtr context)
{
LOG_INFO(log, "Starting up disk {}", name);
object_storage->startup();

View File

@ -45,8 +45,6 @@ public:
bool supportParallelWrite() const override { return object_storage->supportParallelWrite(); }
const String & getName() const override { return name; }
const String & getPath() const override { return metadata_storage->getPath(); }
StoredObjects getStorageObjects(const String & local_path) const override;
@ -138,7 +136,7 @@ public:
void shutdown() override;
void startup(ContextPtr context) override;
void startupImpl(ContextPtr context) override;
ReservationPtr reserve(UInt64 bytes) override;
@ -212,7 +210,6 @@ private:
/// execution.
DiskTransactionPtr createObjectStorageTransaction();
const String name;
const String object_storage_root_path;
Poco::Logger * log;

View File

@ -14,13 +14,14 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
void registerDiskHDFS(DiskFactory & factory)
void registerDiskHDFS(DiskFactory & factory, bool global_skip_access_check)
{
auto creator = [](const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context_,
const DisksMap & /*map*/) -> DiskPtr
auto creator = [global_skip_access_check](
const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
{
String uri{config.getString(config_prefix + ".endpoint")};
checkHDFSURL(uri);
@ -31,19 +32,20 @@ void registerDiskHDFS(DiskFactory & factory)
std::unique_ptr<HDFSObjectStorageSettings> settings = std::make_unique<HDFSObjectStorageSettings>(
config.getUInt64(config_prefix + ".min_bytes_for_seek", 1024 * 1024),
config.getInt(config_prefix + ".objects_chunk_size_to_delete", 1000),
context_->getSettingsRef().hdfs_replication
context->getSettingsRef().hdfs_replication
);
/// FIXME Cache currently unsupported :(
ObjectStoragePtr hdfs_storage = std::make_unique<HDFSObjectStorage>(uri, std::move(settings), config);
auto [_, metadata_disk] = prepareForLocalMetadata(name, config, config_prefix, context_);
auto [_, metadata_disk] = prepareForLocalMetadata(name, config, config_prefix, context);
auto metadata_storage = std::make_shared<MetadataStorageFromDisk>(metadata_disk, uri);
uint64_t copy_thread_pool_size = config.getUInt(config_prefix + ".thread_pool_size", 16);
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
DiskPtr disk_result = std::make_shared<DiskObjectStorage>(
DiskPtr disk = std::make_shared<DiskObjectStorage>(
name,
uri,
"DiskHDFS",
@ -51,8 +53,9 @@ void registerDiskHDFS(DiskFactory & factory)
std::move(hdfs_storage),
/* send_metadata = */ false,
copy_thread_pool_size);
disk->startup(context, skip_access_check);
return std::make_shared<DiskRestartProxy>(disk_result);
return std::make_shared<DiskRestartProxy>(disk);
};
factory.registerDiskType("hdfs", creator);

View File

@ -4,6 +4,7 @@
#include <Common/getRandomASCIIString.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <optional>
#include <ranges>
#include <filesystem>
@ -62,7 +63,7 @@ UnlinkFileOperation::UnlinkFileOperation(const std::string & path_, IDisk & disk
void UnlinkFileOperation::execute(std::unique_lock<std::shared_mutex> &)
{
auto buf = disk.readFile(path);
auto buf = disk.readFile(path, ReadSettings{}, std::nullopt, disk.getFileSize(path));
readStringUntilEOF(prev_data, *buf);
disk.removeFile(path);
}

View File

@ -658,7 +658,7 @@ std::unique_ptr<IObjectStorage> S3ObjectStorage::cloneObjectStorage(
return std::make_unique<S3ObjectStorage>(
std::move(new_client), std::move(new_s3_settings),
version_id, s3_capabilities, new_namespace,
S3::URI(Poco::URI(config.getString(config_prefix + ".endpoint"))).endpoint);
config.getString(config_prefix + ".endpoint"));
}
}

View File

@ -137,7 +137,7 @@ std::unique_ptr<Aws::S3::S3Client> getClient(
settings.request_settings.get_request_throttler,
settings.request_settings.put_request_throttler);
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
S3::URI uri(config.getString(config_prefix + ".endpoint"));
if (uri.key.back() != '/')
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);

View File

@ -22,6 +22,7 @@
#include <IO/S3Common.h>
#include <Storages/StorageS3Settings.h>
#include <Core/ServerUUID.h>
namespace DB
@ -30,92 +31,80 @@ namespace DB
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int PATH_ACCESS_DENIED;
extern const int LOGICAL_ERROR;
}
namespace
{
void checkWriteAccess(IDisk & disk)
class CheckAccess
{
auto file = disk.writeFile("test_acl", DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
try
{
file->write("test", 4);
}
catch (...)
{
/// Log current exception, because finalize() can throw a different exception.
tryLogCurrentException(__PRETTY_FUNCTION__);
file->finalize();
throw;
}
}
void checkReadAccess(const String & disk_name, IDisk & disk)
{
auto file = disk.readFile("test_acl");
String buf(4, '0');
file->readStrict(buf.data(), 4);
if (buf != "test")
throw Exception("No read access to S3 bucket in disk " + disk_name, ErrorCodes::PATH_ACCESS_DENIED);
}
void checkRemoveAccess(IDisk & disk)
{
disk.removeFile("test_acl");
}
bool checkBatchRemoveIsMissing(S3ObjectStorage & storage, const String & key_with_trailing_slash)
{
StoredObject object(key_with_trailing_slash + "_test_remove_objects_capability");
try
{
auto file = storage.writeObject(object, WriteMode::Rewrite);
file->write("test", 4);
file->finalize();
}
catch (...)
public:
static bool checkBatchRemove(S3ObjectStorage & storage, const String & key_with_trailing_slash)
{
/// NOTE: key_with_trailing_slash is the disk prefix, it is required
/// because access is done via S3ObjectStorage not via IDisk interface
/// (since we don't have disk yet).
const String path = fmt::format("{}clickhouse_remove_objects_capability_{}", key_with_trailing_slash, getServerUUID());
StoredObject object(path);
try
{
storage.removeObject(object);
auto file = storage.writeObject(object, WriteMode::Rewrite);
file->write("test", 4);
file->finalize();
}
catch (...)
{
try
{
storage.removeObject(object);
}
catch (...)
{
}
return true; /// We don't have write access, therefore no information about batch remove.
}
return false; /// We don't have write access, therefore no information about batch remove.
}
try
{
/// Uses `DeleteObjects` request (batch delete).
storage.removeObjects({object});
return false;
}
catch (const Exception &)
{
try
{
storage.removeObject(object);
/// Uses `DeleteObjects` request (batch delete).
storage.removeObjects({object});
return true;
}
catch (...)
catch (const Exception &)
{
try
{
storage.removeObject(object);
}
catch (...)
{
}
return false;
}
return true;
}
}
}
void registerDiskS3(DiskFactory & factory)
{
auto creator = [](const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
private:
static String getServerUUID()
{
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
DB::UUID server_uuid = DB::ServerUUID::get();
if (server_uuid == DB::UUIDHelpers::Nil)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Server UUID is not initialized");
return DB::toString(server_uuid);
}
};
}
void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
{
auto creator = [global_skip_access_check](
const String & name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
{
S3::URI uri(config.getString(config_prefix + ".endpoint"));
if (uri.key.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No key in S3 uri: {}", uri.uri.toString());
@ -144,12 +133,12 @@ void registerDiskS3(DiskFactory & factory)
metadata_storage = std::make_shared<MetadataStorageFromDisk>(metadata_disk, uri.key);
}
bool skip_access_check = config.getBool(config_prefix + ".skip_access_check", false);
/// NOTE: should we still perform this check for clickhouse-disks?
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
if (!skip_access_check)
{
/// If `support_batch_delete` is turned on (default), check and possibly switch it off.
if (s3_capabilities.support_batch_delete && checkBatchRemoveIsMissing(*s3_storage, uri.key))
if (s3_capabilities.support_batch_delete && !CheckAccess::checkBatchRemove(*s3_storage, uri.key))
{
LOG_WARNING(
&Poco::Logger::get("registerDiskS3"),
@ -165,7 +154,7 @@ void registerDiskS3(DiskFactory & factory)
bool send_metadata = config.getBool(config_prefix + ".send_metadata", false);
uint64_t copy_thread_pool_size = config.getUInt(config_prefix + ".thread_pool_size", 16);
std::shared_ptr<DiskObjectStorage> s3disk = std::make_shared<DiskObjectStorage>(
DiskObjectStoragePtr s3disk = std::make_shared<DiskObjectStorage>(
name,
uri.key,
type == "s3" ? "DiskS3" : "DiskS3Plain",
@ -174,15 +163,7 @@ void registerDiskS3(DiskFactory & factory)
send_metadata,
copy_thread_pool_size);
/// This code is used only to check access to the corresponding disk.
if (!skip_access_check)
{
checkWriteAccess(*s3disk);
checkReadAccess(name, *s3disk);
checkRemoveAccess(*s3disk);
}
s3disk->startup(context);
s3disk->startup(context, skip_access_check);
std::shared_ptr<IDisk> disk_result = s3disk;
@ -196,6 +177,6 @@ void registerDiskS3(DiskFactory & factory)
#else
void registerDiskS3(DiskFactory &) {}
void registerDiskS3(DiskFactory &, bool /* global_skip_access_check */) {}
#endif

View File

@ -14,15 +14,17 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
void registerDiskWebServer(DiskFactory & factory)
void registerDiskWebServer(DiskFactory & factory, bool global_skip_access_check)
{
auto creator = [](const String & disk_name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
auto creator = [global_skip_access_check](
const String & disk_name,
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
{
String uri{config.getString(config_prefix + ".endpoint")};
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
if (!uri.ends_with('/'))
throw Exception(
@ -41,7 +43,7 @@ void registerDiskWebServer(DiskFactory & factory)
auto metadata_storage = std::make_shared<MetadataStorageFromStaticFilesWebServer>(assert_cast<const WebObjectStorage &>(*object_storage));
std::string root_path;
return std::make_shared<DiskObjectStorage>(
DiskPtr disk = std::make_shared<DiskObjectStorage>(
disk_name,
root_path,
"DiskWebServer",
@ -49,6 +51,8 @@ void registerDiskWebServer(DiskFactory & factory)
object_storage,
/* send_metadata */false,
/* threadpool_size */16);
disk->startup(context, skip_access_check);
return disk;
};
factory.registerDiskType("web", creator);

View File

@ -7,55 +7,55 @@
namespace DB
{
void registerDiskLocal(DiskFactory & factory);
void registerDiskMemory(DiskFactory & factory);
void registerDiskLocal(DiskFactory & factory, bool global_skip_access_check);
void registerDiskMemory(DiskFactory & factory, bool global_skip_access_check);
#if USE_AWS_S3
void registerDiskS3(DiskFactory & factory);
void registerDiskS3(DiskFactory & factory, bool global_skip_access_check);
#endif
#if USE_AZURE_BLOB_STORAGE
void registerDiskAzureBlobStorage(DiskFactory & factory);
void registerDiskAzureBlobStorage(DiskFactory & factory, bool global_skip_access_check);
#endif
#if USE_SSL
void registerDiskEncrypted(DiskFactory & factory);
void registerDiskEncrypted(DiskFactory & factory, bool global_skip_access_check);
#endif
#if USE_HDFS
void registerDiskHDFS(DiskFactory & factory);
void registerDiskHDFS(DiskFactory & factory, bool global_skip_access_check);
#endif
void registerDiskWebServer(DiskFactory & factory);
void registerDiskWebServer(DiskFactory & factory, bool global_skip_access_check);
void registerDiskCache(DiskFactory & factory);
void registerDiskCache(DiskFactory & factory, bool global_skip_access_check);
void registerDisks()
void registerDisks(bool global_skip_access_check)
{
auto & factory = DiskFactory::instance();
registerDiskLocal(factory);
registerDiskMemory(factory);
registerDiskLocal(factory, global_skip_access_check);
registerDiskMemory(factory, global_skip_access_check);
#if USE_AWS_S3
registerDiskS3(factory);
registerDiskS3(factory, global_skip_access_check);
#endif
#if USE_AZURE_BLOB_STORAGE
registerDiskAzureBlobStorage(factory);
registerDiskAzureBlobStorage(factory, global_skip_access_check);
#endif
#if USE_SSL
registerDiskEncrypted(factory);
registerDiskEncrypted(factory, global_skip_access_check);
#endif
#if USE_HDFS
registerDiskHDFS(factory);
registerDiskHDFS(factory, global_skip_access_check);
#endif
registerDiskWebServer(factory);
registerDiskWebServer(factory, global_skip_access_check);
registerDiskCache(factory);
registerDiskCache(factory, global_skip_access_check);
}
}

Some files were not shown because too many files have changed in this diff Show More