mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge branch 'master' into dictinct_in_order_optimization
This commit is contained in:
commit
12f5250e86
@ -34,7 +34,6 @@
|
|||||||
* Add two new settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines` to allow skipping specified number of lines in the beginning of the file in CSV/TSV formats. [#37537](https://github.com/ClickHouse/ClickHouse/pull/37537) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Add two new settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines` to allow skipping specified number of lines in the beginning of the file in CSV/TSV formats. [#37537](https://github.com/ClickHouse/ClickHouse/pull/37537) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* `showCertificate` function shows current server's SSL certificate. [#37540](https://github.com/ClickHouse/ClickHouse/pull/37540) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
* `showCertificate` function shows current server's SSL certificate. [#37540](https://github.com/ClickHouse/ClickHouse/pull/37540) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
* HTTP source for Data Dictionaries in Named Collections is supported. [#37581](https://github.com/ClickHouse/ClickHouse/pull/37581) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
* HTTP source for Data Dictionaries in Named Collections is supported. [#37581](https://github.com/ClickHouse/ClickHouse/pull/37581) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
* Added a new window function `nonNegativeDerivative(metric_column, timestamp_column[, INTERVAL x SECOND])`. [#37628](https://github.com/ClickHouse/ClickHouse/pull/37628) ([Andrey Zvonov](https://github.com/zvonand)).
|
|
||||||
* Implemented changing the comment for `ReplicatedMergeTree` tables. [#37416](https://github.com/ClickHouse/ClickHouse/pull/37416) ([Vasily Nemkov](https://github.com/Enmk)).
|
* Implemented changing the comment for `ReplicatedMergeTree` tables. [#37416](https://github.com/ClickHouse/ClickHouse/pull/37416) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||||
* Added `SYSTEM UNFREEZE` query that deletes the whole backup regardless if the corresponding table is deleted or not. [#36424](https://github.com/ClickHouse/ClickHouse/pull/36424) ([Vadim Volodin](https://github.com/PolyProgrammist)).
|
* Added `SYSTEM UNFREEZE` query that deletes the whole backup regardless if the corresponding table is deleted or not. [#36424](https://github.com/ClickHouse/ClickHouse/pull/36424) ([Vadim Volodin](https://github.com/PolyProgrammist)).
|
||||||
|
|
||||||
|
@ -252,10 +252,10 @@ else ()
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Optionally split binaries and debug symbols.
|
# Optionally split binaries and debug symbols.
|
||||||
option(INSTALL_STRIPPED_BINARIES "Split binaries and debug symbols" OFF)
|
option(SPLIT_DEBUG_SYMBOLS "Split binaries and debug symbols" OFF)
|
||||||
if (INSTALL_STRIPPED_BINARIES)
|
if (SPLIT_DEBUG_SYMBOLS)
|
||||||
message(STATUS "Will split binaries and debug symbols")
|
message(STATUS "Will split binaries and debug symbols")
|
||||||
set(STRIPPED_BINARIES_OUTPUT "stripped" CACHE STRING "A separate directory for stripped information")
|
set(SPLITTED_DEBUG_SYMBOLS_DIR "stripped" CACHE STRING "A separate directory for stripped information")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd
|
cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd
|
||||||
|
@ -15,5 +15,8 @@ ClickHouse® is an open-source column-oriented database management system that a
|
|||||||
* [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any.
|
* [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any.
|
||||||
|
|
||||||
## Upcoming events
|
## Upcoming events
|
||||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/286304312/) Please join us for an evening of talks (in English), food and discussion. Featuring talks of ClickHouse in production and at least one on the deep internals of ClickHouse itself.
|
|
||||||
* [v22.7 Release Webinar](https://clickhouse.com/company/events/v22-7-release-webinar/) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
* [v22.7 Release Webinar](https://clickhouse.com/company/events/v22-7-release-webinar/) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||||
|
* [ClickHouse Meetup at the Cloudflare office in London](https://www.meetup.com/clickhouse-london-user-group/events/286891586/) ClickHouse meetup at the Cloudflare office space in central London
|
||||||
|
* [ClickHouse Meetup at the Metoda office in Munich](https://www.meetup.com/clickhouse-meetup-munich/events/286891667/) ClickHouse meetup at the Metoda office in Munich
|
||||||
|
|
||||||
|
|
||||||
|
@ -40,10 +40,16 @@ if [[ $(./clickhouse client --query "EXISTS hits") == '1' && $(./clickhouse clie
|
|||||||
echo "Dataset already downloaded"
|
echo "Dataset already downloaded"
|
||||||
else
|
else
|
||||||
echo "Will download the dataset"
|
echo "Will download the dataset"
|
||||||
|
if [ "`uname`" = "Darwin" ]
|
||||||
|
then
|
||||||
|
./clickhouse client --receive_timeout 1000 --max_insert_threads $(sysctl -n hw.ncpu) --progress --query "
|
||||||
|
CREATE OR REPLACE TABLE hits ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID), EventTime)
|
||||||
|
AS SELECT * FROM url('https://datasets.clickhouse.com/hits/native/hits_100m_obfuscated_{0..255}.native.zst')"
|
||||||
|
else
|
||||||
./clickhouse client --receive_timeout 1000 --max_insert_threads $(nproc || 4) --progress --query "
|
./clickhouse client --receive_timeout 1000 --max_insert_threads $(nproc || 4) --progress --query "
|
||||||
CREATE OR REPLACE TABLE hits ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID), EventTime)
|
CREATE OR REPLACE TABLE hits ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID), EventTime)
|
||||||
AS SELECT * FROM url('https://datasets.clickhouse.com/hits/native/hits_100m_obfuscated_{0..255}.native.zst')"
|
AS SELECT * FROM url('https://datasets.clickhouse.com/hits/native/hits_100m_obfuscated_{0..255}.native.zst')"
|
||||||
|
fi
|
||||||
./clickhouse client --query "SELECT 'The dataset size is: ', count() FROM hits"
|
./clickhouse client --query "SELECT 'The dataset size is: ', count() FROM hits"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -63,7 +69,7 @@ QUERY_NUM=1
|
|||||||
|
|
||||||
cat "$QUERIES_FILE" | sed "s/{table}/hits/g" | while read query; do
|
cat "$QUERIES_FILE" | sed "s/{table}/hits/g" | while read query; do
|
||||||
sync
|
sync
|
||||||
if [ "${OS}" = "Darwin" ]
|
if [ "`uname`" = "Darwin" ]
|
||||||
then
|
then
|
||||||
sudo purge > /dev/null
|
sudo purge > /dev/null
|
||||||
else
|
else
|
||||||
@ -90,7 +96,7 @@ echo
|
|||||||
|
|
||||||
touch {cpu_model,cpu,df,memory,memory_total,blk,mdstat,instance}.txt
|
touch {cpu_model,cpu,df,memory,memory_total,blk,mdstat,instance}.txt
|
||||||
|
|
||||||
if [ "${OS}" = "Darwin" ]
|
if [ "`uname`" = "Darwin" ]
|
||||||
then
|
then
|
||||||
echo '----Version, build id-----------'
|
echo '----Version, build id-----------'
|
||||||
./clickhouse local --query "SELECT format('Version: {}', version())"
|
./clickhouse local --query "SELECT format('Version: {}', version())"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
macro(clickhouse_strip_binary)
|
macro(clickhouse_split_debug_symbols)
|
||||||
set(oneValueArgs TARGET DESTINATION_DIR BINARY_PATH)
|
set(oneValueArgs TARGET DESTINATION_DIR BINARY_PATH)
|
||||||
|
|
||||||
cmake_parse_arguments(STRIP "" "${oneValueArgs}" "" ${ARGN})
|
cmake_parse_arguments(STRIP "" "${oneValueArgs}" "" ${ARGN})
|
2
contrib/mariadb-connector-c
vendored
2
contrib/mariadb-connector-c
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5f4034a3a6376416504f17186c55fe401c6d8e5e
|
Subproject commit e39608998f5f6944ece9ec61f48e9172ec1de660
|
@ -67,7 +67,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
&& chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
||||||
|
|
||||||
|
|
||||||
EXPOSE 2181 10181 44444
|
EXPOSE 2181 10181 44444 9181
|
||||||
|
|
||||||
VOLUME /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
VOLUME /var/lib/clickhouse /var/log/clickhouse-keeper /etc/clickhouse-keeper
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ else
|
|||||||
DO_CHOWN=0
|
DO_CHOWN=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
KEEPER_CONFIG="${KEEPER_CONFIG:-/etc/clickhouse-keeper/config.yaml}"
|
KEEPER_CONFIG="${KEEPER_CONFIG:-/etc/clickhouse-keeper/keeper_config.xml}"
|
||||||
|
|
||||||
if [ -f "$KEEPER_CONFIG" ] && ! $gosu test -f "$KEEPER_CONFIG" -a -r "$KEEPER_CONFIG"; then
|
if [ -f "$KEEPER_CONFIG" ] && ! $gosu test -f "$KEEPER_CONFIG" -a -r "$KEEPER_CONFIG"; then
|
||||||
echo "Configuration file '$KEEPER_CONFIG' isn't readable by user with id '$USER'"
|
echo "Configuration file '$KEEPER_CONFIG' isn't readable by user with id '$USER'"
|
||||||
|
@ -202,7 +202,7 @@ def parse_env_variables(
|
|||||||
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
||||||
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
||||||
if is_release_build(build_type, package_type, sanitizer, split_binary):
|
if is_release_build(build_type, package_type, sanitizer, split_binary):
|
||||||
cmake_flags.append("-DINSTALL_STRIPPED_BINARIES=ON")
|
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
||||||
result.append("WITH_PERFORMANCE=1")
|
result.append("WITH_PERFORMANCE=1")
|
||||||
if is_cross_arm:
|
if is_cross_arm:
|
||||||
cmake_flags.append("-DBUILD_STANDALONE_KEEPER=1")
|
cmake_flags.append("-DBUILD_STANDALONE_KEEPER=1")
|
||||||
|
@ -42,6 +42,7 @@ function install_packages()
|
|||||||
function configure()
|
function configure()
|
||||||
{
|
{
|
||||||
# install test configs
|
# install test configs
|
||||||
|
export USE_DATABASE_ORDINARY=1
|
||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
|
||||||
# we mount tests folder from repo to /usr/share
|
# we mount tests folder from repo to /usr/share
|
||||||
|
@ -1,121 +0,0 @@
|
|||||||
|
|
||||||
## Developer's guide for adding new CMake options
|
|
||||||
|
|
||||||
### Don't be obvious. Be informative.
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
```cmake
|
|
||||||
option (ENABLE_TESTS "Enables testing" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
This description is quite useless as is neither gives the viewer any additional information nor explains the option purpose.
|
|
||||||
|
|
||||||
Better:
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
If the option's purpose can't be guessed by its name, or the purpose guess may be misleading, or option has some
|
|
||||||
pre-conditions, leave a comment above the `option()` line and explain what it does.
|
|
||||||
The best way would be linking the docs page (if it exists).
|
|
||||||
The comment is parsed into a separate column (see below).
|
|
||||||
|
|
||||||
Even better:
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
# implies ${TESTS_ARE_ENABLED}
|
|
||||||
# see tests/CMakeLists.txt for implementation detail.
|
|
||||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
### If the option's state could produce unwanted (or unusual) result, explicitly warn the user.
|
|
||||||
|
|
||||||
Suppose you have an option that may strip debug symbols from the ClickHouse's part.
|
|
||||||
This can speed up the linking process, but produces a binary that cannot be debugged.
|
|
||||||
In that case, prefer explicitly raising a warning telling the developer that he may be doing something wrong.
|
|
||||||
Also, such options should be disabled if applies.
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
```cmake
|
|
||||||
option(STRIP_DEBUG_SYMBOLS_FUNCTIONS
|
|
||||||
"Do not generate debugger info for ClickHouse functions.
|
|
||||||
${STRIP_DSF_DEFAULT})
|
|
||||||
|
|
||||||
if (STRIP_DEBUG_SYMBOLS_FUNCTIONS)
|
|
||||||
target_compile_options(clickhouse_functions PRIVATE "-g0")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
```
|
|
||||||
Better:
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
# Provides faster linking and lower binary size.
|
|
||||||
# Tradeoff is the inability to debug some source files with e.g. gdb
|
|
||||||
# (empty stack frames and no local variables)."
|
|
||||||
option(STRIP_DEBUG_SYMBOLS_FUNCTIONS
|
|
||||||
"Do not generate debugger info for ClickHouse functions."
|
|
||||||
${STRIP_DSF_DEFAULT})
|
|
||||||
|
|
||||||
if (STRIP_DEBUG_SYMBOLS_FUNCTIONS)
|
|
||||||
message(WARNING "Not generating debugger info for ClickHouse functions")
|
|
||||||
target_compile_options(clickhouse_functions PRIVATE "-g0")
|
|
||||||
endif()
|
|
||||||
```
|
|
||||||
|
|
||||||
### In the option's description, explain WHAT the option does rather than WHY it does something.
|
|
||||||
|
|
||||||
The WHY explanation should be placed in the comment.
|
|
||||||
You may find that the option's name is self-descriptive.
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON)
|
|
||||||
```
|
|
||||||
|
|
||||||
Better:
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
# Only applicable for clang.
|
|
||||||
# Turned off when building with tests or sanitizers.
|
|
||||||
option(ENABLE_THINLTO "Clang-specific link time optimisation" ON).
|
|
||||||
```
|
|
||||||
|
|
||||||
### Don't assume other developers know as much as you do.
|
|
||||||
|
|
||||||
In ClickHouse, there are many tools used that an ordinary developer may not know. If you are in doubt, give a link to
|
|
||||||
the tool's docs. It won't take much of your time.
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON)
|
|
||||||
```
|
|
||||||
|
|
||||||
Better (combined with the above hint):
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
# https://clang.llvm.org/docs/ThinLTO.html
|
|
||||||
# Only applicable for clang.
|
|
||||||
# Turned off when building with tests or sanitizers.
|
|
||||||
option(ENABLE_THINLTO "Clang-specific link time optimisation" ON).
|
|
||||||
```
|
|
||||||
|
|
||||||
Other example, bad:
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
option (USE_INCLUDE_WHAT_YOU_USE "Use 'include-what-you-use' tool" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
Better:
|
|
||||||
|
|
||||||
```cmake
|
|
||||||
# https://github.com/include-what-you-use/include-what-you-use
|
|
||||||
option (USE_INCLUDE_WHAT_YOU_USE "Reduce unneeded #include s (external tool)" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Prefer consistent default values.
|
|
||||||
|
|
||||||
CMake allows you to pass a plethora of values representing boolean `true/false`, e.g. `1, ON, YES, ...`.
|
|
||||||
Prefer the `ON/OFF` values, if possible.
|
|
@ -1,27 +0,0 @@
|
|||||||
# CMake in ClickHouse
|
|
||||||
|
|
||||||
## TL; DR How to make ClickHouse compile and link faster?
|
|
||||||
|
|
||||||
Minimal ClickHouse build example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake .. \
|
|
||||||
-DCMAKE_C_COMPILER=$(which clang-14) \
|
|
||||||
-DCMAKE_CXX_COMPILER=$(which clang++-14) \
|
|
||||||
-DCMAKE_BUILD_TYPE=Debug \
|
|
||||||
-DENABLE_UTILS=OFF \
|
|
||||||
-DENABLE_TESTS=OFF
|
|
||||||
```
|
|
||||||
|
|
||||||
## CMake files types
|
|
||||||
|
|
||||||
1. ClickHouse's source CMake files (located in the root directory and in `/src`).
|
|
||||||
2. Arch-dependent CMake files (located in `/cmake/*os_name*`).
|
|
||||||
3. Libraries finders (search for contrib libraries, located in `/contrib/*/CMakeLists.txt`).
|
|
||||||
3. Contrib build CMake files (used instead of libraries' own CMake files, located in `/cmake/modules`)
|
|
||||||
|
|
||||||
## List of CMake flags
|
|
||||||
|
|
||||||
* This list is auto-generated by [this Python script](https://github.com/clickhouse/clickhouse/blob/master/docs/tools/cmake_in_clickhouse_generator.py).
|
|
||||||
* The flag name is a link to its position in the code.
|
|
||||||
* If an option's default value is itself an option, it's also a link to its position in this list.
|
|
22
docs/changelogs/v22.6.2.12-stable.md
Normal file
22
docs/changelogs/v22.6.2.12-stable.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2022
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2022 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.6.2.12-stable FIXME as compared to v22.6.1.1985-stable
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#38484](https://github.com/ClickHouse/ClickHouse/issues/38484): Improve the stability for hive storage integration test. Move the data prepare step into test.py. [#38260](https://github.com/ClickHouse/ClickHouse/pull/38260) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#38404](https://github.com/ClickHouse/ClickHouse/issues/38404): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Remove processor description from span attributes - it is not working [#38157](https://github.com/ClickHouse/ClickHouse/pull/38157) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Checkout full repositories for performance tests [#38327](https://github.com/ClickHouse/ClickHouse/pull/38327) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Try to fix 02305_schema_inference_with_globs [#38337](https://github.com/ClickHouse/ClickHouse/pull/38337) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
@ -1,545 +0,0 @@
|
|||||||
---
|
|
||||||
sidebar_position: 69
|
|
||||||
sidebar_label: CMake in ClickHouse
|
|
||||||
description: How to make ClickHouse compile and link faster
|
|
||||||
---
|
|
||||||
|
|
||||||
# CMake in ClickHouse
|
|
||||||
|
|
||||||
How to make ClickHouse compile and link faster. Minimal ClickHouse build example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake .. \
|
|
||||||
-DCMAKE_C_COMPILER=$(which clang-13) \
|
|
||||||
-DCMAKE_CXX_COMPILER=$(which clang++-13) \
|
|
||||||
-DCMAKE_BUILD_TYPE=Debug \
|
|
||||||
-DENABLE_UTILS=OFF \
|
|
||||||
-DENABLE_TESTS=OFF
|
|
||||||
```
|
|
||||||
|
|
||||||
## CMake files types
|
|
||||||
|
|
||||||
1. ClickHouse source CMake files (located in the root directory and in /src).
|
|
||||||
2. Arch-dependent CMake files (located in /cmake/*os_name*).
|
|
||||||
3. Libraries finders (search for contrib libraries, located in /contrib/*/CMakeLists.txt).
|
|
||||||
4. Contrib build CMake files (used instead of libraries' own CMake files, located in /cmake/modules)
|
|
||||||
|
|
||||||
## List of CMake flags
|
|
||||||
- The flag name is a link to its position in the code.
|
|
||||||
- If an option's default value is itself an option, it's also a link to its position in this list.
|
|
||||||
|
|
||||||
## ClickHouse modes
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Name</th>
|
|
||||||
<th>Default value</th>
|
|
||||||
<th>Description</th>
|
|
||||||
<th>Comment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-all"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L10" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Enable all ClickHouse modes by default</td>
|
|
||||||
<td>The <code class="syntax">clickhouse</code> binary is a multi purpose tool that contains multiple execution modes (client, server, etc.), each of them may be built and linked as a separate library. If you do not know what modes you need, turn this option OFF and enable SERVER and CLIENT only.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-benchmark"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L20" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_BENCHMARK</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Queries benchmarking mode</td>
|
|
||||||
<td><a href="https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark/" target="_blank">https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark/</a></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-client"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L13" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_CLIENT</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Client mode (interactive tui/shell that connects to the server)</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-compressor"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L25" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_COMPRESSOR</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Data compressor and decompressor</td>
|
|
||||||
<td><a href="https://clickhouse.com/docs/en/operations/utilities/clickhouse-compressor/" target="_blank">https://clickhouse.com/docs/en/operations/utilities/clickhouse-compressor/</a></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-copier"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L28" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_COPIER</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Inter-cluster data copying mode</td>
|
|
||||||
<td><a href="https://clickhouse.com/docs/en/operations/utilities/clickhouse-copier/" target="_blank">https://clickhouse.com/docs/en/operations/utilities/clickhouse-copier/</a></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-extract-from-config"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L22" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Configs processor (extract values etc.)</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-format"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L30" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_FORMAT</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Queries pretty-printer and formatter with syntax highlighting</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-git-import"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L50" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_GIT_IMPORT</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>A tool to analyze Git repositories</td>
|
|
||||||
<td><a href="https://presentations.clickhouse.com/matemarketing_2020/" target="_blank">https://presentations.clickhouse.com/matemarketing_2020/</a></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-install"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L67" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_INSTALL</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Install ClickHouse without .deb/.rpm/.tgz packages (having the binary only)</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-keeper"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L54" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_KEEPER</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>ClickHouse alternative to ZooKeeper</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-keeper-converter"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L56" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_KEEPER_CONVERTER</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Util allows to convert ZooKeeper logs and snapshots into clickhouse-keeper snapshot</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-library-bridge"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L46" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_LIBRARY_BRIDGE</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>HTTP-server working like a proxy to Library dictionary source</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-local"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L17" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_LOCAL</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Local files fast processing mode</td>
|
|
||||||
<td><a href="https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/" target="_blank">https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/</a></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-obfuscator"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L34" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_OBFUSCATOR</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Table data obfuscator (convert real data to benchmark-ready one)</td>
|
|
||||||
<td><a href="https://clickhouse.com/docs/en/operations/utilities/clickhouse-obfuscator/" target="_blank">https://clickhouse.com/docs/en/operations/utilities/clickhouse-obfuscator/</a></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-odbc-bridge"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L40" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_ODBC_BRIDGE</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>HTTP-server working like a proxy to ODBC driver</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-server"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L12" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_SERVER</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>Server mode (main mode)</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clickhouse-static-files-disk-uploader"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/programs/CMakeLists.txt#L52" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLICKHOUSE_STATIC_FILES_DISK_UPLOADER</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CLICKHOUSE_ALL</code></td>
|
|
||||||
<td>A tool to export table data files to be later put to a static files web server</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
|
|
||||||
## External libraries
|
|
||||||
Note that ClickHouse uses forks of these libraries, see https://github.com/ClickHouse-Extras.
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Name</th>
|
|
||||||
<th>Default value</th>
|
|
||||||
<th>Description</th>
|
|
||||||
<th>Comment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-avx"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L19" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_AVX</code></a></td>
|
|
||||||
<td><code class="syntax">0</code></td>
|
|
||||||
<td>Use AVX instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-avx"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L20" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_AVX2</code></a></td>
|
|
||||||
<td><code class="syntax">0</code></td>
|
|
||||||
<td>Use AVX2 instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-avx-for-spec-op"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L23" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_AVX2_FOR_SPEC_OP</code></a></td>
|
|
||||||
<td><code class="syntax">0</code></td>
|
|
||||||
<td>Use avx2 instructions for specific operations on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-avx"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L21" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_AVX512</code></a></td>
|
|
||||||
<td><code class="syntax">0</code></td>
|
|
||||||
<td>Use AVX512 instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-avx-for-spec-op"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L24" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_AVX512_FOR_SPEC_OP</code></a></td>
|
|
||||||
<td><code class="syntax">0</code></td>
|
|
||||||
<td>Use avx512 instructions for specific operations on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-bmi"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L22" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_BMI</code></a></td>
|
|
||||||
<td><code class="syntax">0</code></td>
|
|
||||||
<td>Use BMI instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-ccache"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/ccache.cmake#L22" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CCACHE</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_CCACHE_BY_DEFAULT</code></td>
|
|
||||||
<td>Speedup re-compilations using ccache (external tool)</td>
|
|
||||||
<td><a href="https://ccache.dev/" target="_blank">https://ccache.dev/</a></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-clang-tidy"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/clang_tidy.cmake#L2" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CLANG_TIDY</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Use clang-tidy static analyzer</td>
|
|
||||||
<td><a href="https://clang.llvm.org/extra/clang-tidy/" target="_blank">https://clang.llvm.org/extra/clang-tidy/</a></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-pclmulqdq"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L17" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_PCLMULQDQ</code></a></td>
|
|
||||||
<td><code class="syntax">1</code></td>
|
|
||||||
<td>Use pclmulqdq instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-popcnt"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L18" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_POPCNT</code></a></td>
|
|
||||||
<td><code class="syntax">1</code></td>
|
|
||||||
<td>Use popcnt instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-sse"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L15" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_SSE41</code></a></td>
|
|
||||||
<td><code class="syntax">1</code></td>
|
|
||||||
<td>Use SSE4.1 instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-sse"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L16" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_SSE42</code></a></td>
|
|
||||||
<td><code class="syntax">1</code></td>
|
|
||||||
<td>Use SSE4.2 instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-ssse"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L14" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_SSSE3</code></a></td>
|
|
||||||
<td><code class="syntax">1</code></td>
|
|
||||||
<td>Use SSSE3 instructions on x86_64</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
|
|
||||||
## Other flags
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Name</th>
|
|
||||||
<th>Default value</th>
|
|
||||||
<th>Description</th>
|
|
||||||
<th>Comment</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
<tr>
|
|
||||||
<td><a name="add-gdb-index-for-gold"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L226" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ADD_GDB_INDEX_FOR_GOLD</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Add .gdb-index to resulting binaries for gold linker.</td>
|
|
||||||
<td>Ignored if <code class="syntax">lld</code> is used</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="arch-native"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/cpu_features.cmake#L26" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ARCH_NATIVE</code></a></td>
|
|
||||||
<td><code class="syntax">0</code></td>
|
|
||||||
<td>Add -march=native compiler flag. This makes your binaries non-portable but more performant code may be generated. This option overrides ENABLE_* options for specific instruction set. Highly not recommended to use.</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="build-standalone-keeper"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L253" rel="external nofollow noreferrer" target="_blank"><code class="syntax">BUILD_STANDALONE_KEEPER</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Build keeper as small standalone binary</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="clickhouse-split-binary"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L113" rel="external nofollow noreferrer" target="_blank"><code class="syntax">CLICKHOUSE_SPLIT_BINARY</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Make several binaries (clickhouse-server, clickhouse-client etc.) instead of one bundled</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="compiler-pipe"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L281" rel="external nofollow noreferrer" target="_blank"><code class="syntax">COMPILER_PIPE</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>-pipe compiler option</td>
|
|
||||||
<td>Less <code class="syntax">/tmp</code> usage, more RAM usage.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-build-path-mapping"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L299" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_BUILD_PATH_MAPPING</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Enable remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). It's to generate reproducible builds. See <a href="https://reproducible-builds.org/docs/build-path" target="_blank">https://reproducible-builds.org/docs/build-path</a></td>
|
|
||||||
<td>Reproducible builds If turned <code class="syntax">ON</code>, remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE().</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-check-heavy-builds"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L81" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_CHECK_HEAVY_BUILDS</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Don't allow C++ translation units to compile too long or to take too much memory while compiling.</td>
|
|
||||||
<td>Take care to add prlimit in command line before ccache, or else ccache thinks that prlimit is compiler, and clang++ is its input file, and refuses to work with multiple inputs, e.g in ccache log: [2021-03-31T18:06:32.655327 36900] Command line: /usr/bin/ccache prlimit --as=10000000000 --data=5000000000 --cpu=600 /usr/bin/clang++-11 - ...... std=gnu++2a -MD -MT src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -MF src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o.d -o src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -c ../src/Storages/MergeTree/IMergeTreeDataPart.cpp [2021-03-31T18:06:32.656704 36900] Multiple input files: /usr/bin/clang++-11 and ../src/Storages/MergeTree/IMergeTreeDataPart.cpp Another way would be to use --ccache-skip option before clang++-11 to make ccache ignore it.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-colored-build"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L160" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_COLORED_BUILD</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Enable colored diagnostics in build log.</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-examples"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L201" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_EXAMPLES</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Build all example programs in 'examples' subdirectories</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-fuzzing"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L129" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_FUZZING</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Fuzzy testing using libfuzzer</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-libraries"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L413" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_LIBRARIES</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Enable all external libraries by default</td>
|
|
||||||
<td>Turns on all external libs like s3, kafka, ODBC, ...</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-multitarget-code"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/src/Functions/CMakeLists.txt#L102" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_MULTITARGET_CODE</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Enable platform-dependent code</td>
|
|
||||||
<td>ClickHouse developers may use platform-dependent code under some macro (e.g. <code class="syntax">ifdef ENABLE_MULTITARGET</code>). If turned ON, this option defines such macro. See <code class="syntax">src/Functions/TargetSpecific.h</code></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-tests"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L200" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_TESTS</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Provide unit_test_dbms target with Google.Test unit tests</td>
|
|
||||||
<td>If turned <code class="syntax">ON</code>, assumes the user has either the system GTest library or the bundled one.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="enable-thinlto"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L386" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_THINLTO</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Clang-specific link time optimization</td>
|
|
||||||
<td><a href="https://clang.llvm.org/docs/ThinLTO.html" target="_blank">https://clang.llvm.org/docs/ThinLTO.html</a> Applies to clang only. Disabled when building with tests or sanitizers.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="fail-on-unsupported-options-combination"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L32" rel="external nofollow noreferrer" target="_blank"><code class="syntax">FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Stop/Fail CMake configuration if some ENABLE_XXX option is defined (either ON or OFF) but is not possible to satisfy</td>
|
|
||||||
<td>If turned off: e.g. when ENABLE_FOO is ON, but FOO tool was not found, the CMake will continue.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="glibc-compatibility"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L205" rel="external nofollow noreferrer" target="_blank"><code class="syntax">GLIBC_COMPATIBILITY</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Enable compatibility with older glibc libraries.</td>
|
|
||||||
<td>Only for Linux, x86_64 or aarch64.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="install-stripped-binaries"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L270" rel="external nofollow noreferrer" target="_blank"><code class="syntax">INSTALL_STRIPPED_BINARIES</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Build stripped binaries with debug info in separate directory</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="linker-name"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/tools.cmake#L58" rel="external nofollow noreferrer" target="_blank"><code class="syntax">LINKER_NAME</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Linker name or full path</td>
|
|
||||||
<td>Example values: <code class="syntax">lld-10</code>, <code class="syntax">gold</code>.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="parallel-compile-jobs"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/limit_jobs.cmake#L10" rel="external nofollow noreferrer" target="_blank"><code class="syntax">PARALLEL_COMPILE_JOBS</code></a></td>
|
|
||||||
<td><code class="syntax">""</code></td>
|
|
||||||
<td>Maximum number of concurrent compilation jobs</td>
|
|
||||||
<td>1 if not set</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="parallel-link-jobs"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/limit_jobs.cmake#L13" rel="external nofollow noreferrer" target="_blank"><code class="syntax">PARALLEL_LINK_JOBS</code></a></td>
|
|
||||||
<td><code class="syntax">""</code></td>
|
|
||||||
<td>Maximum number of concurrent link jobs</td>
|
|
||||||
<td>1 if not set</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="sanitize"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/sanitize.cmake#L7" rel="external nofollow noreferrer" target="_blank"><code class="syntax">SANITIZE</code></a></td>
|
|
||||||
<td><code class="syntax">""</code></td>
|
|
||||||
<td>Enable one of the code sanitizers</td>
|
|
||||||
<td>Possible values: - <code class="syntax">address</code> (ASan) - <code class="syntax">memory</code> (MSan) - <code class="syntax">thread</code> (TSan) - <code class="syntax">undefined</code> (UBSan) - "" (no sanitizing)</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="split-shared-libraries"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L111" rel="external nofollow noreferrer" target="_blank"><code class="syntax">SPLIT_SHARED_LIBRARIES</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Keep all internal libraries as separate .so files</td>
|
|
||||||
<td>DEVELOPER ONLY. Faster linking if turned on.</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="strip-debug-symbols-functions"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/src/Functions/CMakeLists.txt#L48" rel="external nofollow noreferrer" target="_blank"><code class="syntax">STRIP_DEBUG_SYMBOLS_FUNCTIONS</code></a></td>
|
|
||||||
<td><code class="syntax">STRIP_DSF_DEFAULT</code></td>
|
|
||||||
<td>Do not generate debugger info for ClickHouse functions</td>
|
|
||||||
<td>Provides faster linking and lower binary size. Tradeoff is the inability to debug some source files with e.g. gdb (empty stack frames and no local variables)."</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="use-debug-helpers"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L252" rel="external nofollow noreferrer" target="_blank"><code class="syntax">USE_DEBUG_HELPERS</code></a></td>
|
|
||||||
<td><code class="syntax">USE_DEBUG_HELPERS</code></td>
|
|
||||||
<td>Enable debug helpers</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="use-static-libraries"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L106" rel="external nofollow noreferrer" target="_blank"><code class="syntax">USE_STATIC_LIBRARIES</code></a></td>
|
|
||||||
<td><code class="syntax">ON</code></td>
|
|
||||||
<td>Disable to use shared libraries</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="use-unwind"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/cmake/unwind.cmake#L1" rel="external nofollow noreferrer" target="_blank"><code class="syntax">USE_UNWIND</code></a></td>
|
|
||||||
<td><code class="syntax">ENABLE_LIBRARIES</code></td>
|
|
||||||
<td>Enable libunwind (better stacktraces)</td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="werror"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L417" rel="external nofollow noreferrer" target="_blank"><code class="syntax">WERROR</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Enable -Werror compiler option</td>
|
|
||||||
<td>Using system libs can cause a lot of warnings in includes (on macro expansion).</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td><a name="with-coverage"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L344" rel="external nofollow noreferrer" target="_blank"><code class="syntax">WITH_COVERAGE</code></a></td>
|
|
||||||
<td><code class="syntax">OFF</code></td>
|
|
||||||
<td>Profile the resulting binary/binaries</td>
|
|
||||||
<td>Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc</td>
|
|
||||||
</tr>
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
## Developer's guide for adding new CMake options
|
|
||||||
|
|
||||||
#### Don't be obvious. Be informative.
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
|
|
||||||
```
|
|
||||||
option (ENABLE_TESTS "Enables testing" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
This description is quite useless as it neither gives the viewer any additional information nor explains the option purpose.
|
|
||||||
|
|
||||||
Better:
|
|
||||||
|
|
||||||
```
|
|
||||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
If the option's purpose can't be guessed by its name, or the purpose guess may be misleading, or option has some
|
|
||||||
pre-conditions, leave a comment above the option() line and explain what it does.
|
|
||||||
The best way would be linking the docs page (if it exists).
|
|
||||||
The comment is parsed into a separate column (see below).
|
|
||||||
|
|
||||||
Even better:
|
|
||||||
|
|
||||||
```
|
|
||||||
# implies ${TESTS_ARE_ENABLED}
|
|
||||||
# see tests/CMakeLists.txt for implementation detail.
|
|
||||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.test unit tests" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### If the option's state could produce unwanted (or unusual) result, explicitly warn the user.
|
|
||||||
|
|
||||||
Suppose you have an option that may strip debug symbols from the ClickHouse part.
|
|
||||||
This can speed up the linking process, but produces a binary that cannot be debugged.
|
|
||||||
In that case, prefer explicitly raising a warning telling the developer that he may be doing something wrong.
|
|
||||||
Also, such options should be disabled if applies.
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
|
|
||||||
```
|
|
||||||
option(STRIP_DEBUG_SYMBOLS_FUNCTIONS
|
|
||||||
"Do not generate debugger info for ClickHouse functions.
|
|
||||||
${STRIP_DSF_DEFAULT})
|
|
||||||
|
|
||||||
if (STRIP_DEBUG_SYMBOLS_FUNCTIONS)
|
|
||||||
target_compile_options(clickhouse_functions PRIVATE "-g0")
|
|
||||||
endif()
|
|
||||||
```
|
|
||||||
|
|
||||||
Better:
|
|
||||||
|
|
||||||
```
|
|
||||||
# Provides faster linking and lower binary size.
|
|
||||||
# Tradeoff is the inability to debug some source files with e.g. gdb
|
|
||||||
# (empty stack frames and no local variables)."
|
|
||||||
option(STRIP_DEBUG_SYMBOLS_FUNCTIONS
|
|
||||||
"Do not generate debugger info for ClickHouse functions."
|
|
||||||
${STRIP_DSF_DEFAULT})
|
|
||||||
|
|
||||||
if (STRIP_DEBUG_SYMBOLS_FUNCTIONS)
|
|
||||||
message(WARNING "Not generating debugger info for ClickHouse functions")
|
|
||||||
target_compile_options(clickhouse_functions PRIVATE "-g0")
|
|
||||||
endif()
|
|
||||||
```
|
|
||||||
|
|
||||||
#### In the option's description, explain WHAT the option does rather than WHY it does something.
|
|
||||||
The WHY explanation should be placed in the comment. You may find that the option's name is self-descriptive.
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
|
|
||||||
```
|
|
||||||
option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON)
|
|
||||||
```
|
|
||||||
|
|
||||||
Better:
|
|
||||||
|
|
||||||
```
|
|
||||||
# Only applicable for clang.
|
|
||||||
# Turned off when building with tests or sanitizers.
|
|
||||||
option(ENABLE_THINLTO "Clang-specific link time optimisation" ON).
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Don't assume other developers know as much as you do.
|
|
||||||
In ClickHouse, there are many tools used that an ordinary developer may not know. If you are in doubt, give a link to
|
|
||||||
the tool's docs. It won't take much of your time.
|
|
||||||
|
|
||||||
Bad:
|
|
||||||
|
|
||||||
```
|
|
||||||
option(ENABLE_THINLTO "Enable Thin LTO. Only applicable for clang. It's also suppressed when building with tests or sanitizers." ON)
|
|
||||||
```
|
|
||||||
|
|
||||||
Better (combined with the above hint):
|
|
||||||
|
|
||||||
```
|
|
||||||
# https://clang.llvm.org/docs/ThinLTO.html
|
|
||||||
# Only applicable for clang.
|
|
||||||
# Turned off when building with tests or sanitizers.
|
|
||||||
option(ENABLE_THINLTO "Clang-specific link time optimisation" ON).
|
|
||||||
```
|
|
||||||
|
|
||||||
Other example, bad:
|
|
||||||
|
|
||||||
```
|
|
||||||
option (USE_INCLUDE_WHAT_YOU_USE "Use 'include-what-you-use' tool" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
Better:
|
|
||||||
|
|
||||||
```
|
|
||||||
# https://github.com/include-what-you-use/include-what-you-use
|
|
||||||
option (USE_INCLUDE_WHAT_YOU_USE "Reduce unneeded #include s (external tool)" OFF)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Prefer consistent default values.
|
|
||||||
CMake allows you to pass a plethora of values representing boolean true/false, e.g. 1, ON, YES, ....
|
|
||||||
|
|
||||||
Prefer the ON/OFF values, if possible.
|
|
||||||
|
|
@ -136,4 +136,3 @@ DESCRIBE TABLE test_database.test_table;
|
|||||||
└────────┴───────────────────┘
|
└────────┴───────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/database-engines/postgresql/) <!--hide-->
|
|
||||||
|
@ -43,4 +43,3 @@ The `TinyLog` engine is the simplest in the family and provides the poorest func
|
|||||||
|
|
||||||
The `Log` and `StripeLog` engines support parallel data reading. When reading data, ClickHouse uses multiple threads. Each thread processes a separate data block. The `Log` engine uses a separate file for each column of the table. `StripeLog` stores all the data in one file. As a result, the `StripeLog` engine uses fewer file descriptors, but the `Log` engine provides higher efficiency when reading data.
|
The `Log` and `StripeLog` engines support parallel data reading. When reading data, ClickHouse uses multiple threads. Each thread processes a separate data block. The `Log` engine uses a separate file for each column of the table. `StripeLog` stores all the data in one file. As a result, the `StripeLog` engine uses fewer file descriptors, but the `Log` engine provides higher efficiency when reading data.
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/log_family/) <!--hide-->
|
|
||||||
|
@ -68,40 +68,42 @@ For a description of parameters, see the [CREATE query description](../../../sql
|
|||||||
|
|
||||||
`ORDER BY` — The sorting key.
|
`ORDER BY` — The sorting key.
|
||||||
|
|
||||||
A tuple of column names or arbitrary expressions. Example: `ORDER BY (CounterID, EventDate)`.
|
A tuple of column names or arbitrary expressions. Example: `ORDER BY (CounterID, EventDate)`.
|
||||||
|
|
||||||
ClickHouse uses the sorting key as a primary key if the primary key is not defined explicitly by the `PRIMARY KEY` clause.
|
ClickHouse uses the sorting key as a primary key if the primary key is not defined explicitly by the `PRIMARY KEY` clause.
|
||||||
|
|
||||||
Use the `ORDER BY tuple()` syntax, if you do not need sorting. See [Selecting the Primary Key](#selecting-the-primary-key).
|
Use the `ORDER BY tuple()` syntax, if you do not need sorting. See [Selecting the Primary Key](#selecting-the-primary-key).
|
||||||
|
|
||||||
#### PARTITION BY
|
#### PARTITION BY
|
||||||
|
|
||||||
`PARTITION BY` — The [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression).
|
`PARTITION BY` — The [partitioning key](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression).
|
||||||
|
|
||||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](../../../sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](../../../sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||||
|
|
||||||
#### PRIMARY KEY
|
#### PRIMARY KEY
|
||||||
|
|
||||||
`PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional.
|
`PRIMARY KEY` — The primary key if it [differs from the sorting key](#choosing-a-primary-key-that-differs-from-the-sorting-key). Optional.
|
||||||
|
|
||||||
By default the primary key is the same as the sorting key (which is specified by the `ORDER BY` clause). Thus in most cases it is unnecessary to specify a separate `PRIMARY KEY` clause.
|
By default the primary key is the same as the sorting key (which is specified by the `ORDER BY` clause). Thus in most cases it is unnecessary to specify a separate `PRIMARY KEY` clause.
|
||||||
|
|
||||||
#### SAMPLE BY
|
#### SAMPLE BY
|
||||||
|
|
||||||
`SAMPLE BY` — An expression for sampling. Optional.
|
`SAMPLE BY` — An expression for sampling. Optional.
|
||||||
|
|
||||||
If a sampling expression is used, the primary key must contain it. The result of a sampling expression must be an unsigned integer. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`.
|
If a sampling expression is used, the primary key must contain it. The result of a sampling expression must be an unsigned integer. Example: `SAMPLE BY intHash32(UserID) ORDER BY (CounterID, EventDate, intHash32(UserID))`.
|
||||||
|
|
||||||
#### TTL
|
#### TTL
|
||||||
|
|
||||||
`TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). Optional.
|
`TTL` — A list of rules specifying storage duration of rows and defining logic of automatic parts movement [between disks and volumes](#table_engine-mergetree-multiple-volumes). Optional.
|
||||||
|
|
||||||
Expression must have one `Date` or `DateTime` column as a result. Example:
|
Expression must have one `Date` or `DateTime` column as a result. Example:
|
||||||
`TTL date + INTERVAL 1 DAY`
|
```
|
||||||
|
TTL date + INTERVAL 1 DAY
|
||||||
|
```
|
||||||
|
|
||||||
Type of the rule `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'|GROUP BY` specifies an action to be done with the part if the expression is satisfied (reaches current time): removal of expired rows, moving a part (if expression is satisfied for all rows in a part) to specified disk (`TO DISK 'xxx'`) or to volume (`TO VOLUME 'xxx'`), or aggregating values in expired rows. Default type of the rule is removal (`DELETE`). List of multiple rules can be specified, but there should be no more than one `DELETE` rule.
|
Type of the rule `DELETE|TO DISK 'xxx'|TO VOLUME 'xxx'|GROUP BY` specifies an action to be done with the part if the expression is satisfied (reaches current time): removal of expired rows, moving a part (if expression is satisfied for all rows in a part) to specified disk (`TO DISK 'xxx'`) or to volume (`TO VOLUME 'xxx'`), or aggregating values in expired rows. Default type of the rule is removal (`DELETE`). List of multiple rules can be specified, but there should be no more than one `DELETE` rule.
|
||||||
|
|
||||||
For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl)
|
For more details, see [TTL for columns and tables](#table_engine-mergetree-ttl)
|
||||||
|
|
||||||
### SETTINGS
|
### SETTINGS
|
||||||
Additional parameters that control the behavior of the `MergeTree` (optional):
|
Additional parameters that control the behavior of the `MergeTree` (optional):
|
||||||
@ -129,7 +131,6 @@ Additional parameters that control the behavior of the `MergeTree` (optional):
|
|||||||
#### min_merge_bytes_to_use_direct_io
|
#### min_merge_bytes_to_use_direct_io
|
||||||
|
|
||||||
`min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option). If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled. Default value: `10 * 1024 * 1024 * 1024` bytes.
|
`min_merge_bytes_to_use_direct_io` — The minimum data volume for merge operation that is required for using direct I/O access to the storage disk. When merging data parts, ClickHouse calculates the total storage volume of all the data to be merged. If the volume exceeds `min_merge_bytes_to_use_direct_io` bytes, ClickHouse reads and writes the data to the storage disk using the direct I/O interface (`O_DIRECT` option). If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled. Default value: `10 * 1024 * 1024 * 1024` bytes.
|
||||||
<a name="mergetree_setting-merge_with_ttl_timeout"></a>
|
|
||||||
|
|
||||||
#### merge_with_ttl_timeout
|
#### merge_with_ttl_timeout
|
||||||
|
|
||||||
@ -305,15 +306,29 @@ For `SELECT` queries, ClickHouse analyzes whether an index can be used. An index
|
|||||||
Thus, it is possible to quickly run queries on one or many ranges of the primary key. In this example, queries will be fast when run for a specific tracking tag, for a specific tag and date range, for a specific tag and date, for multiple tags with a date range, and so on.
|
Thus, it is possible to quickly run queries on one or many ranges of the primary key. In this example, queries will be fast when run for a specific tracking tag, for a specific tag and date range, for a specific tag and date, for multiple tags with a date range, and so on.
|
||||||
|
|
||||||
Let’s look at the engine configured as follows:
|
Let’s look at the engine configured as follows:
|
||||||
|
```sql
|
||||||
ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate) SETTINGS index_granularity=8192
|
ENGINE MergeTree()
|
||||||
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
|
ORDER BY (CounterID, EventDate)
|
||||||
|
SETTINGS index_granularity=8192
|
||||||
|
```
|
||||||
|
|
||||||
In this case, in queries:
|
In this case, in queries:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT count() FROM table WHERE EventDate = toDate(now()) AND CounterID = 34
|
SELECT count() FROM table
|
||||||
SELECT count() FROM table WHERE EventDate = toDate(now()) AND (CounterID = 34 OR CounterID = 42)
|
WHERE EventDate = toDate(now())
|
||||||
SELECT count() FROM table WHERE ((EventDate >= toDate('2014-01-01') AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01')) AND CounterID IN (101500, 731962, 160656) AND (CounterID = 101500 OR EventDate != toDate('2014-05-01'))
|
AND CounterID = 34
|
||||||
|
|
||||||
|
SELECT count() FROM table
|
||||||
|
WHERE EventDate = toDate(now())
|
||||||
|
AND (CounterID = 34 OR CounterID = 42)
|
||||||
|
|
||||||
|
SELECT count() FROM table
|
||||||
|
WHERE ((EventDate >= toDate('2014-01-01')
|
||||||
|
AND EventDate <= toDate('2014-01-31')) OR EventDate = toDate('2014-05-01'))
|
||||||
|
AND CounterID IN (101500, 731962, 160656)
|
||||||
|
AND (CounterID = 101500 OR EventDate != toDate('2014-05-01'))
|
||||||
```
|
```
|
||||||
|
|
||||||
ClickHouse will use the primary key index to trim improper data and the monthly partitioning key to trim partitions that are in improper date ranges.
|
ClickHouse will use the primary key index to trim improper data and the monthly partitioning key to trim partitions that are in improper date ranges.
|
||||||
@ -376,36 +391,36 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234
|
|||||||
|
|
||||||
#### `minmax`
|
#### `minmax`
|
||||||
|
|
||||||
Stores extremes of the specified expression (if the expression is `tuple`, then it stores extremes for each element of `tuple`), uses stored info for skipping blocks of data like the primary key.
|
Stores extremes of the specified expression (if the expression is `tuple`, then it stores extremes for each element of `tuple`), uses stored info for skipping blocks of data like the primary key.
|
||||||
|
|
||||||
#### `set(max_rows)`
|
#### `set(max_rows)`
|
||||||
|
|
||||||
Stores unique values of the specified expression (no more than `max_rows` rows, `max_rows=0` means “no limits”). Uses the values to check if the `WHERE` expression is not satisfiable on a block of data.
|
Stores unique values of the specified expression (no more than `max_rows` rows, `max_rows=0` means “no limits”). Uses the values to check if the `WHERE` expression is not satisfiable on a block of data.
|
||||||
|
|
||||||
#### `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)`
|
#### `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)`
|
||||||
|
|
||||||
Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) that contains all ngrams from a block of data. Works only with datatypes: [String](../../../sql-reference/data-types/string.md), [FixedString](../../../sql-reference/data-types/fixedstring.md) and [Map](../../../sql-reference/data-types/map.md). Can be used for optimization of `EQUALS`, `LIKE` and `IN` expressions.
|
Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) that contains all ngrams from a block of data. Works only with datatypes: [String](../../../sql-reference/data-types/string.md), [FixedString](../../../sql-reference/data-types/fixedstring.md) and [Map](../../../sql-reference/data-types/map.md). Can be used for optimization of `EQUALS`, `LIKE` and `IN` expressions.
|
||||||
|
|
||||||
- `n` — ngram size,
|
- `n` — ngram size,
|
||||||
- `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well).
|
- `size_of_bloom_filter_in_bytes` — Bloom filter size in bytes (you can use large values here, for example, 256 or 512, because it can be compressed well).
|
||||||
- `number_of_hash_functions` — The number of hash functions used in the Bloom filter.
|
- `number_of_hash_functions` — The number of hash functions used in the Bloom filter.
|
||||||
- `random_seed` — The seed for Bloom filter hash functions.
|
- `random_seed` — The seed for Bloom filter hash functions.
|
||||||
|
|
||||||
#### `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)`
|
#### `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, random_seed)`
|
||||||
|
|
||||||
The same as `ngrambf_v1`, but stores tokens instead of ngrams. Tokens are sequences separated by non-alphanumeric characters.
|
The same as `ngrambf_v1`, but stores tokens instead of ngrams. Tokens are sequences separated by non-alphanumeric characters.
|
||||||
|
|
||||||
#### `bloom_filter([false_positive])` — Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) for the specified columns.
|
#### `bloom_filter([false_positive])` — Stores a [Bloom filter](https://en.wikipedia.org/wiki/Bloom_filter) for the specified columns.
|
||||||
|
|
||||||
The optional `false_positive` parameter is the probability of receiving a false positive response from the filter. Possible values: (0, 1). Default value: 0.025.
|
The optional `false_positive` parameter is the probability of receiving a false positive response from the filter. Possible values: (0, 1). Default value: 0.025.
|
||||||
|
|
||||||
Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`, `UUID`, `Map`.
|
Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`, `Array`, `LowCardinality`, `Nullable`, `UUID`, `Map`.
|
||||||
|
|
||||||
For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function.
|
For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function.
|
||||||
|
|
||||||
The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall).
|
The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall).
|
||||||
|
|
||||||
Example of index creation for `Map` data type
|
Example of index creation for `Map` data type
|
||||||
|
|
||||||
```
|
```
|
||||||
INDEX map_key_index mapKeys(map_column) TYPE bloom_filter GRANULARITY 1
|
INDEX map_key_index mapKeys(map_column) TYPE bloom_filter GRANULARITY 1
|
||||||
|
@ -86,4 +86,3 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
|||||||
- Indices
|
- Indices
|
||||||
- Replication
|
- Replication
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/file/) <!--hide-->
|
|
||||||
|
@ -151,4 +151,3 @@ ALTER TABLE id_val_join DELETE WHERE id = 3;
|
|||||||
└────┴─────┘
|
└────┴─────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/join/) <!--hide-->
|
|
||||||
|
@ -86,4 +86,3 @@ SELECT * FROM WatchLog;
|
|||||||
- [Virtual columns](../../../engines/table-engines/special/index.md#table_engines-virtual_columns)
|
- [Virtual columns](../../../engines/table-engines/special/index.md#table_engines-virtual_columns)
|
||||||
- [merge](../../../sql-reference/table-functions/merge.md) table function
|
- [merge](../../../sql-reference/table-functions/merge.md) table function
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/merge/) <!--hide-->
|
|
||||||
|
@ -10,6 +10,3 @@ When writing to a `Null` table, data is ignored. When reading from a `Null` tabl
|
|||||||
:::note
|
:::note
|
||||||
If you are wondering why this is useful, note that you can create a materialized view on a `Null` table. So the data written to the table will end up affecting the view, but original raw data will still be discarded.
|
If you are wondering why this is useful, note that you can create a materialized view on a `Null` table. So the data written to the table will end up affecting the view, but original raw data will still be discarded.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/null/) <!--hide-->
|
|
||||||
|
@ -20,4 +20,3 @@ When creating a table, the following settings are applied:
|
|||||||
|
|
||||||
- [persistent](../../../operations/settings/settings.md#persistent)
|
- [persistent](../../../operations/settings/settings.md#persistent)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/set/) <!--hide-->
|
|
||||||
|
@ -89,4 +89,3 @@ SELECT * FROM url_engine_table
|
|||||||
- Indexes.
|
- Indexes.
|
||||||
- Replication.
|
- Replication.
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/table_engines/special/url/) <!--hide-->
|
|
||||||
|
@ -51,7 +51,6 @@ ClickHouse Inc does **not** maintain the libraries listed below and hasn’t don
|
|||||||
- [clickhouse-rs](https://github.com/suharev7/clickhouse-rs)
|
- [clickhouse-rs](https://github.com/suharev7/clickhouse-rs)
|
||||||
- [Klickhouse](https://github.com/Protryon/klickhouse)
|
- [Klickhouse](https://github.com/Protryon/klickhouse)
|
||||||
- R
|
- R
|
||||||
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
|
|
||||||
- [RClickHouse](https://github.com/IMSMWU/RClickHouse)
|
- [RClickHouse](https://github.com/IMSMWU/RClickHouse)
|
||||||
- Java
|
- Java
|
||||||
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
||||||
|
@ -13,4 +13,3 @@ The following external authenticators and directories are supported:
|
|||||||
- Kerberos [Authenticator](./kerberos.md#external-authenticators-kerberos)
|
- Kerberos [Authenticator](./kerberos.md#external-authenticators-kerberos)
|
||||||
- [SSL X.509 authentication](./ssl-x509.md#ssl-external-authentication)
|
- [SSL X.509 authentication](./ssl-x509.md#ssl-external-authentication)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/external-authenticators/index/) <!--hide-->
|
|
||||||
|
@ -61,4 +61,3 @@ exception_code: ZOK
|
|||||||
2 rows in set. Elapsed: 0.025 sec.
|
2 rows in set. Elapsed: 0.025 sec.
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/system_tables/distributed_ddl_queuedistributed_ddl_queue.md) <!--hide-->
|
|
||||||
|
@ -47,4 +47,3 @@ last_exception:
|
|||||||
|
|
||||||
- [Distributed table engine](../../engines/table-engines/special/distributed.md)
|
- [Distributed table engine](../../engines/table-engines/special/distributed.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/system_tables/distribution_queue) <!--hide-->
|
|
||||||
|
@ -50,4 +50,3 @@ attribute.values: []
|
|||||||
|
|
||||||
- [OpenTelemetry](../../operations/opentelemetry.md)
|
- [OpenTelemetry](../../operations/opentelemetry.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/system_tables/opentelemetry_span_log) <!--hide-->
|
|
||||||
|
@ -145,4 +145,3 @@ column_marks_bytes: 48
|
|||||||
|
|
||||||
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/system_tables/parts_columns) <!--hide-->
|
|
||||||
|
@ -88,4 +88,3 @@ last_postpone_time: 1970-01-01 03:00:00
|
|||||||
|
|
||||||
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md#query-language-system-replicated)
|
- [Managing ReplicatedMergeTree Tables](../../sql-reference/statements/system.md#query-language-system-replicated)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/system_tables/replication_queue) <!--hide-->
|
|
||||||
|
@ -128,7 +128,8 @@ You should never use manually written scripts to transfer data between different
|
|||||||
|
|
||||||
If you want to divide an existing ZooKeeper cluster into two, the correct way is to increase the number of its replicas and then reconfigure it as two independent clusters.
|
If you want to divide an existing ZooKeeper cluster into two, the correct way is to increase the number of its replicas and then reconfigure it as two independent clusters.
|
||||||
|
|
||||||
You can run ClickHouse Keeper on the same server as ClickHouse, but do not run ZooKeeper on the same servers as ClickHouse. Because ZooKeeper is very sensitive for latency and ClickHouse may utilize all available system resources.
|
You can run ClickHouse Keeper on the same server as ClickHouse in test environments, or in environments with low ingestion rate.
|
||||||
|
For production environments we suggest to use separate servers for ClickHouse and ZooKeeper/Keeper, or place ClickHouse files and Keeper files on to separate disks. Because ZooKeeper/Keeper are very sensitive for disk latency and ClickHouse may utilize all available system resources.
|
||||||
|
|
||||||
You can have ZooKeeper observers in an ensemble but ClickHouse servers should not interact with observers.
|
You can have ZooKeeper observers in an ensemble but ClickHouse servers should not interact with observers.
|
||||||
|
|
||||||
|
@ -66,5 +66,3 @@ Result:
|
|||||||
└──────────────────────────────────────────────────────────────────────────────────┘
|
└──────────────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/meanZTest/) <!--hide-->
|
|
||||||
|
@ -69,4 +69,3 @@ Result:
|
|||||||
- [Welch's t-test](https://en.wikipedia.org/wiki/Welch%27s_t-test)
|
- [Welch's t-test](https://en.wikipedia.org/wiki/Welch%27s_t-test)
|
||||||
- [studentTTest function](studentttest.md#studentttest)
|
- [studentTTest function](studentttest.md#studentttest)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/welchTTest/) <!--hide-->
|
|
||||||
|
@ -27,4 +27,3 @@ You can use domains anywhere corresponding base type can be used, for example:
|
|||||||
- Can’t implicitly convert string values into domain values when inserting data from another column or table.
|
- Can’t implicitly convert string values into domain values when inserting data from another column or table.
|
||||||
- Domain adds no constrains on stored values.
|
- Domain adds no constrains on stored values.
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/data_types/domains/) <!--hide-->
|
|
||||||
|
@ -104,4 +104,3 @@ Result:
|
|||||||
└─────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────┘
|
└─────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/data-types/geo/) <!--hide-->
|
|
||||||
|
@ -108,4 +108,3 @@ Result:
|
|||||||
- [map()](../../sql-reference/functions/tuple-map-functions.md#function-map) function
|
- [map()](../../sql-reference/functions/tuple-map-functions.md#function-map) function
|
||||||
- [CAST()](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) function
|
- [CAST()](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) function
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/data-types/map/) <!--hide-->
|
|
||||||
|
@ -39,4 +39,3 @@ Values of the `SimpleAggregateFunction(func, Type)` look and stored the same way
|
|||||||
CREATE TABLE simple (id UInt64, val SimpleAggregateFunction(sum, Double)) ENGINE=AggregatingMergeTree ORDER BY id;
|
CREATE TABLE simple (id UInt64, val SimpleAggregateFunction(sum, Double)) ENGINE=AggregatingMergeTree ORDER BY id;
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/data_types/simpleaggregatefunction/) <!--hide-->
|
|
||||||
|
@ -355,4 +355,3 @@ Result:
|
|||||||
└───────────┘
|
└───────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/encryption_functions/) <!--hide-->
|
|
||||||
|
@ -20,57 +20,57 @@ sidebar_position: 62
|
|||||||
|
|
||||||
## H3 Indexes Functions
|
## H3 Indexes Functions
|
||||||
|
|
||||||
- [h3IsValid](./h3#h3IsValid)
|
- [h3IsValid](./h3.md#h3IsValid)
|
||||||
- [h3GetResolution](./h3#h3GetResolution)
|
- [h3GetResolution](./h3.md#h3GetResolution)
|
||||||
- [h3EdgeAngle](./h3#h3EdgeAngle)
|
- [h3EdgeAngle](./h3.md#h3EdgeAngle)
|
||||||
- [h3EdgeLengthM](./h3#h3EdgeLengthM)
|
- [h3EdgeLengthM](./h3.md#h3EdgeLengthM)
|
||||||
- [h3EdgeLengthKm] (./h3#h3EdgeLengthKm)
|
- [h3EdgeLengthKm](./h3.md#h3EdgeLengthKm)
|
||||||
- [geoToH3](./h3#geoToH3)
|
- [geoToH3](./h3.md#geoToH3)
|
||||||
- [h3ToGeo](./h3#h3ToGeo)
|
- [h3ToGeo](./h3.md#h3ToGeo)
|
||||||
- [h3ToGeoBoundary](./h3#h3ToGeoBoundary)
|
- [h3ToGeoBoundary](./h3.md#h3ToGeoBoundary)
|
||||||
- [h3kRing](./h3#h3kRing)
|
- [h3kRing](./h3.md#h3kRing)
|
||||||
- [h3GetBaseCell](./h3#h3GetBaseCell)
|
- [h3GetBaseCell](./h3.md#h3GetBaseCell)
|
||||||
- [h3HexAreaM2](./h3#h3HexAreaM2)
|
- [h3HexAreaM2](./h3.md#h3HexAreaM2)
|
||||||
- [h3HexAreaKm2](./h3#h3HexAreaKm2)
|
- [h3HexAreaKm2](./h3.md#h3HexAreaKm2)
|
||||||
- [h3IndexesAreNeighbors](./h3#h3IndexesAreNeighbors)
|
- [h3IndexesAreNeighbors](./h3.md#h3IndexesAreNeighbors)
|
||||||
- [h3ToChildren](./h3#h3ToChildren)
|
- [h3ToChildren](./h3.md#h3ToChildren)
|
||||||
- [h3ToParent](./h3#h3ToParent)
|
- [h3ToParent](./h3.md#h3ToParent)
|
||||||
- [h3ToString](./h3#h3ToString)
|
- [h3ToString](./h3.md#h3ToString)
|
||||||
- [stringToH3](./h3#stringToH3)
|
- [stringToH3](./h3.md#stringToH3)
|
||||||
- [h3GetResolution](./h3#h3GetResolution)
|
- [h3GetResolution](./h3.md#h3GetResolution)
|
||||||
- [h3IsResClassIII](./h3#h3IsResClassIII)
|
- [h3IsResClassIII](./h3.md#h3IsResClassIII)
|
||||||
- [h3IsPentagon](./h3#h3IsPentagon)
|
- [h3IsPentagon](./h3.md#h3IsPentagon)
|
||||||
- [h3GetFaces](./h3#h3GetFaces)
|
- [h3GetFaces](./h3.md#h3GetFaces)
|
||||||
- [h3CellAreaM2](./h3#h3CellAreaM2)
|
- [h3CellAreaM2](./h3.md#h3CellAreaM2)
|
||||||
- [h3CellAreaRads2](./h3#h3CellAreaRads2)
|
- [h3CellAreaRads2](./h3.md#h3CellAreaRads2)
|
||||||
- [h3ToCenterChild](./h3#h3ToCenterChild)
|
- [h3ToCenterChild](./h3.md#h3ToCenterChild)
|
||||||
- [h3ExactEdgeLengthM](./h3#h3ExactEdgeLengthM)
|
- [h3ExactEdgeLengthM](./h3.md#h3ExactEdgeLengthM)
|
||||||
- [h3ExactEdgeLengthKm](./h3#h3ExactEdgeLengthKm)
|
- [h3ExactEdgeLengthKm](./h3.md#h3ExactEdgeLengthKm)
|
||||||
- [h3ExactEdgeLengthRads](./h3#h3ExactEdgeLengthRads)
|
- [h3ExactEdgeLengthRads](./h3.md#h3ExactEdgeLengthRads)
|
||||||
- [h3NumHexagons](./h3#h3NumHexagons)
|
- [h3NumHexagons](./h3.md#h3NumHexagons)
|
||||||
- [h3Line](./h3#h3Line)
|
- [h3Line](./h3.md#h3Line)
|
||||||
- [h3Distance](./h3#h3Distance)
|
- [h3Distance](./h3.md#h3Distance)
|
||||||
- [h3HexRing](./h3#h3HexRing)
|
- [h3HexRing](./h3.md#h3HexRing)
|
||||||
- [h3GetUnidirectionalEdge](./h3#h3GetUnidirectionalEdge)
|
- [h3GetUnidirectionalEdge](./h3.md#h3GetUnidirectionalEdge)
|
||||||
- [h3UnidirectionalEdgeIsValid](./h3#h3UnidirectionalEdgeIsValid)
|
- [h3UnidirectionalEdgeIsValid](./h3.md#h3UnidirectionalEdgeIsValid)
|
||||||
- [h3GetOriginIndexFromUnidirectionalEdge](./h3#h3GetOriginIndexFromUnidirectionalEdge)
|
- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3GetOriginIndexFromUnidirectionalEdge)
|
||||||
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3#h3GetDestinationIndexFromUnidirectionalEdge)
|
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3GetDestinationIndexFromUnidirectionalEdge)
|
||||||
- [h3GetIndexesFromUnidirectionalEdge](./h3#h3GetIndexesFromUnidirectionalEdge)
|
- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3GetIndexesFromUnidirectionalEdge)
|
||||||
- [h3GetUnidirectionalEdgesFromHexagon](./h3#h3GetUnidirectionalEdgesFromHexagon)
|
- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3GetUnidirectionalEdgesFromHexagon)
|
||||||
- [h3GetUnidirectionalEdgeBoundary](./h3#h3GetUnidirectionalEdgeBoundary)
|
- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3GetUnidirectionalEdgeBoundary)
|
||||||
|
|
||||||
## S2 Index Functions
|
## S2 Index Functions
|
||||||
|
|
||||||
- [geoToS2](./s2#geoToS2)
|
- [geoToS2](./s2.md#geoToS2)
|
||||||
- [s2ToGeo](./s2#s2ToGeo)
|
- [s2ToGeo](./s2.md#s2ToGeo)
|
||||||
- [s2GetNeighbors](./s2#s2GetNeighbors)
|
- [s2GetNeighbors](./s2.md#s2GetNeighbors)
|
||||||
- [s2CellsIntersect](./s2#s2CellsIntersect)
|
- [s2CellsIntersect](./s2.md#s2CellsIntersect)
|
||||||
- [s2CapContains](./s2#s2CapContains)
|
- [s2CapContains](./s2.md#s2CapContains)
|
||||||
- [s2CapUnion](./s2#s2CapUnion)
|
- [s2CapUnion](./s2.md#s2CapUnion)
|
||||||
- [s2RectAdd](./s2#s2RectAdd)
|
- [s2RectAdd](./s2.md#s2RectAdd)
|
||||||
- [s2RectContains](./s2#s2RectContains)
|
- [s2RectContains](./s2.md#s2RectContains)
|
||||||
- [s2RectUinion](./s2#s2RectUinion)
|
- [s2RectUinion](./s2.md#s2RectUinion)
|
||||||
- [s2RectIntersection](./s2#s2RectIntersection)
|
- [s2RectIntersection](./s2.md#s2RectIntersection)
|
||||||
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/geo/) <!--hide-->
|
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/geo/) <!--hide-->
|
||||||
|
@ -312,11 +312,11 @@ The aggregation can be performed more effectively, if a table is sorted by some
|
|||||||
### GROUP BY in External Memory
|
### GROUP BY in External Memory
|
||||||
|
|
||||||
You can enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`.
|
You can enable dumping temporary data to the disk to restrict memory usage during `GROUP BY`.
|
||||||
The [max_bytes_before_external_group_by](../../../operations/settings/settings.md#settings-max_bytes_before_external_group_by) setting determines the threshold RAM consumption for dumping `GROUP BY` temporary data to the file system. If set to 0 (the default), it is disabled.
|
The [max_bytes_before_external_group_by](../../../operations/settings/query-complexity.md#settings-max_bytes_before_external_group_by) setting determines the threshold RAM consumption for dumping `GROUP BY` temporary data to the file system. If set to 0 (the default), it is disabled.
|
||||||
|
|
||||||
When using `max_bytes_before_external_group_by`, we recommend that you set `max_memory_usage` about twice as high. This is necessary because there are two stages to aggregation: reading the data and forming intermediate data (1) and merging the intermediate data (2). Dumping data to the file system can only occur during stage 1. If the temporary data wasn’t dumped, then stage 2 might require up to the same amount of memory as in stage 1.
|
When using `max_bytes_before_external_group_by`, we recommend that you set `max_memory_usage` about twice as high. This is necessary because there are two stages to aggregation: reading the data and forming intermediate data (1) and merging the intermediate data (2). Dumping data to the file system can only occur during stage 1. If the temporary data wasn’t dumped, then stage 2 might require up to the same amount of memory as in stage 1.
|
||||||
|
|
||||||
For example, if [max_memory_usage](../../../operations/settings/settings.md#settings_max_memory_usage) was set to 10000000000 and you want to use external aggregation, it makes sense to set `max_bytes_before_external_group_by` to 10000000000, and `max_memory_usage` to 20000000000. When external aggregation is triggered (if there was at least one dump of temporary data), maximum consumption of RAM is only slightly more than `max_bytes_before_external_group_by`.
|
For example, if [max_memory_usage](../../../operations/settings/query-complexity.md#settings_max_memory_usage) was set to 10000000000 and you want to use external aggregation, it makes sense to set `max_bytes_before_external_group_by` to 10000000000, and `max_memory_usage` to 20000000000. When external aggregation is triggered (if there was at least one dump of temporary data), maximum consumption of RAM is only slightly more than `max_bytes_before_external_group_by`.
|
||||||
|
|
||||||
With distributed query processing, external aggregation is performed on remote servers. In order for the requester server to use only a small amount of RAM, set `distributed_aggregation_memory_efficient` to 1.
|
With distributed query processing, external aggregation is performed on remote servers. In order for the requester server to use only a small amount of RAM, set `distributed_aggregation_memory_efficient` to 1.
|
||||||
|
|
||||||
|
@ -111,4 +111,3 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123');
|
|||||||
- [The ‘MySQL’ table engine](../../engines/table-engines/integrations/mysql.md)
|
- [The ‘MySQL’ table engine](../../engines/table-engines/integrations/mysql.md)
|
||||||
- [Using MySQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
- [Using MySQL as a source of external dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/sql-reference/table_functions/mysql/) <!--hide-->
|
|
||||||
|
@ -45,7 +45,6 @@ sidebar_label: "Клиентские библиотеки от сторонни
|
|||||||
- [clickhouse-rs](https://github.com/suharev7/clickhouse-rs)
|
- [clickhouse-rs](https://github.com/suharev7/clickhouse-rs)
|
||||||
- [Klickhouse](https://github.com/Protryon/klickhouse)
|
- [Klickhouse](https://github.com/Protryon/klickhouse)
|
||||||
- R
|
- R
|
||||||
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
|
|
||||||
- [RClickhouse](https://github.com/IMSMWU/RClickhouse)
|
- [RClickhouse](https://github.com/IMSMWU/RClickhouse)
|
||||||
- Java
|
- Java
|
||||||
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
||||||
|
@ -264,10 +264,10 @@ GROUP BY вычисляет для каждого встретившегося
|
|||||||
### Группировка во внешней памяти {#select-group-by-in-external-memory}
|
### Группировка во внешней памяти {#select-group-by-in-external-memory}
|
||||||
|
|
||||||
Можно включить сброс временных данных на диск, чтобы ограничить потребление оперативной памяти при выполнении `GROUP BY`.
|
Можно включить сброс временных данных на диск, чтобы ограничить потребление оперативной памяти при выполнении `GROUP BY`.
|
||||||
Настройка [max_bytes_before_external_group_by](../../../operations/settings/settings.md#settings-max_bytes_before_external_group_by) определяет пороговое значение потребления RAM, по достижении которого временные данные `GROUP BY` сбрасываются в файловую систему. Если равно 0 (по умолчанию) - значит выключено.
|
Настройка [max_bytes_before_external_group_by](../../../operations/settings/query-complexity.md#settings-max_bytes_before_external_group_by) определяет пороговое значение потребления RAM, по достижении которого временные данные `GROUP BY` сбрасываются в файловую систему. Если равно 0 (по умолчанию) - значит выключено.
|
||||||
|
|
||||||
При использовании `max_bytes_before_external_group_by`, рекомендуем выставить `max_memory_usage` приблизительно в два раза больше. Это следует сделать, потому что агрегация выполняется в две стадии: чтение и формирование промежуточных данных (1) и слияние промежуточных данных (2). Сброс данных на файловую систему может производиться только на стадии 1. Если сброса временных данных не было, то на стадии 2 может потребляться до такого же объёма памяти, как на стадии 1.
|
При использовании `max_bytes_before_external_group_by`, рекомендуем выставить `max_memory_usage` приблизительно в два раза больше. Это следует сделать, потому что агрегация выполняется в две стадии: чтение и формирование промежуточных данных (1) и слияние промежуточных данных (2). Сброс данных на файловую систему может производиться только на стадии 1. Если сброса временных данных не было, то на стадии 2 может потребляться до такого же объёма памяти, как на стадии 1.
|
||||||
|
|
||||||
Например, если [max_memory_usage](../../../operations/settings/settings.md#settings_max_memory_usage) было выставлено в 10000000000, и вы хотите использовать внешнюю агрегацию, то имеет смысл выставить `max_bytes_before_external_group_by` в 10000000000, а `max_memory_usage` в 20000000000. При срабатывании внешней агрегации (если был хотя бы один сброс временных данных в файловую систему) максимальное потребление оперативки будет лишь чуть-чуть больше `max_bytes_before_external_group_by`.
|
Например, если [max_memory_usage](../../../operations/settings/query-complexity.md#settings_max_memory_usage) было выставлено в 10000000000, и вы хотите использовать внешнюю агрегацию, то имеет смысл выставить `max_bytes_before_external_group_by` в 10000000000, а `max_memory_usage` в 20000000000. При срабатывании внешней агрегации (если был хотя бы один сброс временных данных в файловую систему) максимальное потребление оперативки будет лишь чуть-чуть больше `max_bytes_before_external_group_by`.
|
||||||
|
|
||||||
При распределённой обработке запроса внешняя агрегация производится на удалённых серверах. Для того чтобы на сервере-инициаторе запроса использовалось немного оперативки, нужно выставить настройку `distributed_aggregation_memory_efficient` в 1.
|
При распределённой обработке запроса внешняя агрегация производится на удалённых серверах. Для того чтобы на сервере-инициаторе запроса использовалось немного оперативки, нужно выставить настройку `distributed_aggregation_memory_efficient` в 1.
|
||||||
|
@ -46,7 +46,6 @@ Yandex**没有**维护下面列出的库,也没有做过任何广泛的测试
|
|||||||
- [clickhouse-rs](https://github.com/suharev7/clickhouse-rs)
|
- [clickhouse-rs](https://github.com/suharev7/clickhouse-rs)
|
||||||
- [Klickhouse](https://github.com/Protryon/klickhouse)
|
- [Klickhouse](https://github.com/Protryon/klickhouse)
|
||||||
- R
|
- R
|
||||||
- [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r)
|
|
||||||
- [RClickHouse](https://github.com/IMSMWU/RClickHouse)
|
- [RClickHouse](https://github.com/IMSMWU/RClickHouse)
|
||||||
- Java
|
- Java
|
||||||
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
- [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java)
|
||||||
|
@ -116,11 +116,11 @@ GROUP BY domain
|
|||||||
### 在外部存储器中分组 {#select-group-by-in-external-memory}
|
### 在外部存储器中分组 {#select-group-by-in-external-memory}
|
||||||
|
|
||||||
您可以启用将临时数据转储到磁盘以限制内存使用期间 `GROUP BY`.
|
您可以启用将临时数据转储到磁盘以限制内存使用期间 `GROUP BY`.
|
||||||
该 [max_bytes_before_external_group_by](../../../operations/settings/settings.md#settings-max_bytes_before_external_group_by) 设置确定倾销的阈值RAM消耗 `GROUP BY` 临时数据到文件系统。 如果设置为0(默认值),它将被禁用。
|
该 [max_bytes_before_external_group_by](../../../operations/settings/query-complexity.md#settings-max_bytes_before_external_group_by) 设置确定倾销的阈值RAM消耗 `GROUP BY` 临时数据到文件系统。 如果设置为0(默认值),它将被禁用。
|
||||||
|
|
||||||
使用时 `max_bytes_before_external_group_by`,我们建议您设置 `max_memory_usage` 大约两倍高。 这是必要的,因为聚合有两个阶段:读取数据和形成中间数据(1)和合并中间数据(2)。 将数据转储到文件系统只能在阶段1中发生。 如果未转储临时数据,则阶段2可能需要与阶段1相同的内存量。
|
使用时 `max_bytes_before_external_group_by`,我们建议您设置 `max_memory_usage` 大约两倍高。 这是必要的,因为聚合有两个阶段:读取数据和形成中间数据(1)和合并中间数据(2)。 将数据转储到文件系统只能在阶段1中发生。 如果未转储临时数据,则阶段2可能需要与阶段1相同的内存量。
|
||||||
|
|
||||||
例如,如果 [max_memory_usage](../../../operations/settings/settings.md#settings_max_memory_usage) 设置为10000000000,你想使用外部聚合,这是有意义的设置 `max_bytes_before_external_group_by` 到10000000000,和 `max_memory_usage` 到20000000000。 当触发外部聚合(如果至少有一个临时数据转储)时,RAM的最大消耗仅略高于 `max_bytes_before_external_group_by`.
|
例如,如果 [max_memory_usage](../../../operations/settings/query-complexity.md#settings_max_memory_usage) 设置为10000000000,你想使用外部聚合,这是有意义的设置 `max_bytes_before_external_group_by` 到10000000000,和 `max_memory_usage` 到20000000000。 当触发外部聚合(如果至少有一个临时数据转储)时,RAM的最大消耗仅略高于 `max_bytes_before_external_group_by`.
|
||||||
|
|
||||||
通过分布式查询处理,在远程服务器上执行外部聚合。 为了使请求者服务器只使用少量的RAM,设置 `distributed_aggregation_memory_efficient` 到1。
|
通过分布式查询处理,在远程服务器上执行外部聚合。 为了使请求者服务器只使用少量的RAM,设置 `distributed_aggregation_memory_efficient` 到1。
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ if (USE_CLANG_TIDY)
|
|||||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
include(${ClickHouse_SOURCE_DIR}/cmake/strip_binary.cmake)
|
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
||||||
|
|
||||||
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
||||||
# each of them may be built and linked as a separate library.
|
# each of them may be built and linked as a separate library.
|
||||||
@ -18,6 +18,12 @@ option (ENABLE_CLICKHOUSE_SERVER "Server mode (main mode)" ${ENABLE_CLICKHOUSE_A
|
|||||||
option (ENABLE_CLICKHOUSE_CLIENT "Client mode (interactive tui/shell that connects to the server)"
|
option (ENABLE_CLICKHOUSE_CLIENT "Client mode (interactive tui/shell that connects to the server)"
|
||||||
${ENABLE_CLICKHOUSE_ALL})
|
${ENABLE_CLICKHOUSE_ALL})
|
||||||
|
|
||||||
|
if (CLICKHOUSE_SPLIT_BINARY OR NOT ENABLE_UTILS)
|
||||||
|
option (ENABLE_CLICKHOUSE_SELF_EXTRACTING "Self-extracting executable" OFF)
|
||||||
|
else ()
|
||||||
|
option (ENABLE_CLICKHOUSE_SELF_EXTRACTING "Self-extracting executable" ON)
|
||||||
|
endif ()
|
||||||
|
|
||||||
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/
|
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/
|
||||||
option (ENABLE_CLICKHOUSE_LOCAL "Local files fast processing mode" ${ENABLE_CLICKHOUSE_ALL})
|
option (ENABLE_CLICKHOUSE_LOCAL "Local files fast processing mode" ${ENABLE_CLICKHOUSE_ALL})
|
||||||
|
|
||||||
@ -101,6 +107,12 @@ else()
|
|||||||
message(STATUS "Local mode: OFF")
|
message(STATUS "Local mode: OFF")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
|
||||||
|
message(STATUS "Self-extracting executable: ON")
|
||||||
|
else()
|
||||||
|
message(STATUS "Self-extracting executable: OFF")
|
||||||
|
endif()
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_BENCHMARK)
|
if (ENABLE_CLICKHOUSE_BENCHMARK)
|
||||||
message(STATUS "Benchmark mode: ON")
|
message(STATUS "Benchmark mode: ON")
|
||||||
else()
|
else()
|
||||||
@ -266,6 +278,10 @@ if (ENABLE_CLICKHOUSE_LIBRARY_BRIDGE)
|
|||||||
add_subdirectory (library-bridge)
|
add_subdirectory (library-bridge)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
|
||||||
|
add_subdirectory (self-extracting)
|
||||||
|
endif ()
|
||||||
|
|
||||||
if (CLICKHOUSE_ONE_SHARED)
|
if (CLICKHOUSE_ONE_SHARED)
|
||||||
add_library(clickhouse-lib SHARED
|
add_library(clickhouse-lib SHARED
|
||||||
${CLICKHOUSE_SERVER_SOURCES}
|
${CLICKHOUSE_SERVER_SOURCES}
|
||||||
@ -511,10 +527,10 @@ else ()
|
|||||||
add_custom_command(TARGET clickhouse POST_BUILD COMMAND ./clickhouse hash-binary > hash && ${OBJCOPY_PATH} --add-section .clickhouse.hash=hash clickhouse COMMENT "Adding section '.clickhouse.hash' to clickhouse binary" VERBATIM)
|
add_custom_command(TARGET clickhouse POST_BUILD COMMAND ./clickhouse hash-binary > hash && ${OBJCOPY_PATH} --add-section .clickhouse.hash=hash clickhouse COMMENT "Adding section '.clickhouse.hash' to clickhouse binary" VERBATIM)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (INSTALL_STRIPPED_BINARIES)
|
if (SPLIT_DEBUG_SYMBOLS)
|
||||||
clickhouse_strip_binary(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${STRIPPED_BINARIES_OUTPUT} BINARY_PATH clickhouse)
|
clickhouse_split_debug_symbols(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH clickhouse)
|
||||||
else()
|
else()
|
||||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${STRIPPED_BINARIES_OUTPUT})
|
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||||
install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
install (TARGETS clickhouse RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -131,10 +131,10 @@ if (BUILD_STANDALONE_KEEPER)
|
|||||||
add_dependencies(clickhouse-keeper clickhouse_keeper_configs)
|
add_dependencies(clickhouse-keeper clickhouse_keeper_configs)
|
||||||
set_target_properties(clickhouse-keeper PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../)
|
set_target_properties(clickhouse-keeper PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../)
|
||||||
|
|
||||||
if (INSTALL_STRIPPED_BINARIES)
|
if (SPLIT_DEBUG_SYMBOLS)
|
||||||
clickhouse_strip_binary(TARGET clickhouse-keeper DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT} BINARY_PATH ../clickhouse-keeper)
|
clickhouse_split_debug_symbols(TARGET clickhouse-keeper DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-keeper)
|
||||||
else()
|
else()
|
||||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-keeper DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT})
|
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-keeper DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||||
install(TARGETS clickhouse-keeper RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
install(TARGETS clickhouse-keeper RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
include(${ClickHouse_SOURCE_DIR}/cmake/strip_binary.cmake)
|
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
||||||
|
|
||||||
set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES
|
set (CLICKHOUSE_LIBRARY_BRIDGE_SOURCES
|
||||||
library-bridge.cpp
|
library-bridge.cpp
|
||||||
@ -24,9 +24,9 @@ target_link_libraries(clickhouse-library-bridge PRIVATE
|
|||||||
|
|
||||||
set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
|
set_target_properties(clickhouse-library-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)
|
||||||
|
|
||||||
if (INSTALL_STRIPPED_BINARIES)
|
if (SPLIT_DEBUG_SYMBOLS)
|
||||||
clickhouse_strip_binary(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT} BINARY_PATH ../clickhouse-library-bridge)
|
clickhouse_split_debug_symbols(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-library-bridge)
|
||||||
else()
|
else()
|
||||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT})
|
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-library-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||||
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
install(TARGETS clickhouse-library-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||||
endif()
|
endif()
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
include(${ClickHouse_SOURCE_DIR}/cmake/strip_binary.cmake)
|
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
||||||
|
|
||||||
set (CLICKHOUSE_ODBC_BRIDGE_SOURCES
|
set (CLICKHOUSE_ODBC_BRIDGE_SOURCES
|
||||||
ColumnInfoHandler.cpp
|
ColumnInfoHandler.cpp
|
||||||
@ -39,10 +39,10 @@ if (USE_GDB_ADD_INDEX)
|
|||||||
add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM)
|
add_custom_command(TARGET clickhouse-odbc-bridge POST_BUILD COMMAND ${GDB_ADD_INDEX_EXE} ../clickhouse-odbc-bridge COMMENT "Adding .gdb-index to clickhouse-odbc-bridge" VERBATIM)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (INSTALL_STRIPPED_BINARIES)
|
if (SPLIT_DEBUG_SYMBOLS)
|
||||||
clickhouse_strip_binary(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT} BINARY_PATH ../clickhouse-odbc-bridge)
|
clickhouse_split_debug_symbols(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH ../clickhouse-odbc-bridge)
|
||||||
else()
|
else()
|
||||||
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${STRIPPED_BINARIES_OUTPUT})
|
clickhouse_make_empty_debug_info_for_nfpm(TARGET clickhouse-odbc-bridge DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/../${SPLITTED_DEBUG_SYMBOLS_DIR})
|
||||||
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
install(TARGETS clickhouse-odbc-bridge RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
6
programs/self-extracting/CMakeLists.txt
Normal file
6
programs/self-extracting/CMakeLists.txt
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
add_custom_target (self-extracting ALL
|
||||||
|
${CMAKE_COMMAND} -E remove clickhouse
|
||||||
|
COMMAND ${CMAKE_BINARY_DIR}/utils/self-extracting-executable/compressor clickhouse ../clickhouse
|
||||||
|
DEPENDS clickhouse compressor
|
||||||
|
)
|
||||||
|
|
@ -10,6 +10,7 @@
|
|||||||
#include <Backups/BackupEntriesCollector.h>
|
#include <Backups/BackupEntriesCollector.h>
|
||||||
#include <Backups/BackupEntryFromMemory.h>
|
#include <Backups/BackupEntryFromMemory.h>
|
||||||
#include <Backups/IBackup.h>
|
#include <Backups/IBackup.h>
|
||||||
|
#include <Backups/RestoreSettings.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
@ -25,6 +26,7 @@ namespace DB
|
|||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
|
extern const int CANNOT_RESTORE_TABLE;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,7 +141,7 @@ namespace
|
|||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
{
|
{
|
||||||
e.addMessage("While parsing " + file_path);
|
e.addMessage("While parsing " + file_path + " from backup");
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -225,7 +227,7 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessRightsElements getRequiredAccessToRestore(const std::unordered_map<UUID, AccessEntityPtr> & entities)
|
AccessRightsElements getRequiredAccessToRestore(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities)
|
||||||
{
|
{
|
||||||
AccessRightsElements res;
|
AccessRightsElements res;
|
||||||
for (const auto & entity : entities | boost::adaptors::map_values)
|
for (const auto & entity : entities | boost::adaptors::map_values)
|
||||||
@ -294,65 +296,78 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void backupAccessEntities(
|
|
||||||
BackupEntriesCollector & backup_entries_collector,
|
std::pair<String, BackupEntryPtr> makeBackupEntryForAccess(
|
||||||
|
const std::vector<std::pair<UUID, AccessEntityPtr>> access_entities,
|
||||||
const String & data_path_in_backup,
|
const String & data_path_in_backup,
|
||||||
const AccessControl & access_control,
|
size_t counter,
|
||||||
AccessEntityType type)
|
const AccessControl & access_control)
|
||||||
{
|
{
|
||||||
auto entities = access_control.readAllForBackup(type, backup_entries_collector.getBackupSettings());
|
auto dependencies = readDependenciesNamesAndTypes(findDependencies(access_entities), access_control);
|
||||||
auto dependencies = readDependenciesNamesAndTypes(findDependencies(entities), access_control);
|
|
||||||
AccessEntitiesInBackup ab;
|
AccessEntitiesInBackup ab;
|
||||||
boost::range::copy(entities, std::inserter(ab.entities, ab.entities.end()));
|
boost::range::copy(access_entities, std::inserter(ab.entities, ab.entities.end()));
|
||||||
ab.dependencies = std::move(dependencies);
|
ab.dependencies = std::move(dependencies);
|
||||||
backup_entries_collector.addBackupEntry(fs::path{data_path_in_backup} / "access.txt", ab.toBackupEntry());
|
String filename = fmt::format("access{:02}.txt", counter + 1); /// access01.txt, access02.txt, ...
|
||||||
|
String file_path_in_backup = fs::path{data_path_in_backup} / filename;
|
||||||
|
return {file_path_in_backup, ab.toBackupEntry()};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
AccessRestoreTask::AccessRestoreTask(
|
AccessRestorerFromBackup::AccessRestorerFromBackup(
|
||||||
const BackupPtr & backup_, const RestoreSettings & restore_settings_, std::shared_ptr<IRestoreCoordination> restore_coordination_)
|
const BackupPtr & backup_, const RestoreSettings & restore_settings_)
|
||||||
: backup(backup_), restore_settings(restore_settings_), restore_coordination(restore_coordination_)
|
: backup(backup_), allow_unresolved_access_dependencies(restore_settings_.allow_unresolved_access_dependencies)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
AccessRestoreTask::~AccessRestoreTask() = default;
|
AccessRestorerFromBackup::~AccessRestorerFromBackup() = default;
|
||||||
|
|
||||||
void AccessRestoreTask::addDataPath(const String & data_path)
|
void AccessRestorerFromBackup::addDataPath(const String & data_path, const QualifiedTableName & table_name_for_logs)
|
||||||
{
|
{
|
||||||
if (!data_paths.emplace(data_path).second)
|
if (!data_paths.emplace(data_path).second)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
String file_path = fs::path{data_path} / "access.txt";
|
fs::path data_path_in_backup_fs = data_path;
|
||||||
auto backup_entry = backup->readFile(file_path);
|
Strings filenames = backup->listFiles(data_path);
|
||||||
auto ab = AccessEntitiesInBackup::fromBackupEntry(*backup_entry, file_path);
|
if (filenames.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (const String & filename : filenames)
|
||||||
|
{
|
||||||
|
if (!filename.starts_with("access") || !filename.ends_with(".txt"))
|
||||||
|
throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore table {}: File name {} doesn't match the wildcard \"access*.txt\"",
|
||||||
|
table_name_for_logs.getFullName(), String{data_path_in_backup_fs / filename});
|
||||||
|
}
|
||||||
|
|
||||||
|
::sort(filenames.begin(), filenames.end());
|
||||||
|
|
||||||
|
for (const String & filename : filenames)
|
||||||
|
{
|
||||||
|
String filepath_in_backup = data_path_in_backup_fs / filename;
|
||||||
|
auto backup_entry = backup->readFile(filepath_in_backup);
|
||||||
|
auto ab = AccessEntitiesInBackup::fromBackupEntry(*backup_entry, filepath_in_backup);
|
||||||
|
|
||||||
|
boost::range::copy(ab.entities, std::back_inserter(entities));
|
||||||
|
boost::range::copy(ab.dependencies, std::inserter(dependencies, dependencies.end()));
|
||||||
|
}
|
||||||
|
|
||||||
boost::range::copy(ab.entities, std::inserter(entities, entities.end()));
|
|
||||||
boost::range::copy(ab.dependencies, std::inserter(dependencies, dependencies.end()));
|
|
||||||
for (const auto & id : entities | boost::adaptors::map_keys)
|
for (const auto & id : entities | boost::adaptors::map_keys)
|
||||||
dependencies.erase(id);
|
dependencies.erase(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AccessRestoreTask::hasDataPath(const String & data_path) const
|
AccessRightsElements AccessRestorerFromBackup::getRequiredAccess() const
|
||||||
{
|
|
||||||
return data_paths.contains(data_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
AccessRightsElements AccessRestoreTask::getRequiredAccess() const
|
|
||||||
{
|
{
|
||||||
return getRequiredAccessToRestore(entities);
|
return getRequiredAccessToRestore(entities);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AccessRestoreTask::restore(AccessControl & access_control) const
|
std::vector<std::pair<UUID, AccessEntityPtr>> AccessRestorerFromBackup::getAccessEntities(const AccessControl & access_control) const
|
||||||
{
|
{
|
||||||
auto old_to_new_ids = resolveDependencies(dependencies, access_control, restore_settings.allow_unresolved_access_dependencies);
|
auto new_entities = entities;
|
||||||
|
|
||||||
std::vector<std::pair<UUID, AccessEntityPtr>> new_entities;
|
auto old_to_new_ids = resolveDependencies(dependencies, access_control, allow_unresolved_access_dependencies);
|
||||||
boost::range::copy(entities, std::back_inserter(new_entities));
|
|
||||||
generateRandomIDs(new_entities, old_to_new_ids);
|
generateRandomIDs(new_entities, old_to_new_ids);
|
||||||
|
|
||||||
replaceDependencies(new_entities, old_to_new_ids);
|
replaceDependencies(new_entities, old_to_new_ids);
|
||||||
|
|
||||||
access_control.insertFromBackup(new_entities, restore_settings, restore_coordination);
|
return new_entities;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Backups/RestoreSettings.h>
|
#include <Core/UUID.h>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
|
|
||||||
@ -9,46 +9,45 @@ namespace DB
|
|||||||
{
|
{
|
||||||
class AccessControl;
|
class AccessControl;
|
||||||
enum class AccessEntityType;
|
enum class AccessEntityType;
|
||||||
class BackupEntriesCollector;
|
|
||||||
class RestorerFromBackup;
|
|
||||||
class IBackup;
|
|
||||||
using BackupPtr = std::shared_ptr<const IBackup>;
|
|
||||||
class IRestoreCoordination;
|
|
||||||
struct IAccessEntity;
|
struct IAccessEntity;
|
||||||
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;
|
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;
|
||||||
class AccessRightsElements;
|
class AccessRightsElements;
|
||||||
|
class IBackup;
|
||||||
|
using BackupPtr = std::shared_ptr<const IBackup>;
|
||||||
|
class IBackupEntry;
|
||||||
|
using BackupEntryPtr = std::shared_ptr<const IBackupEntry>;
|
||||||
|
struct RestoreSettings;
|
||||||
|
struct QualifiedTableName;
|
||||||
|
|
||||||
|
|
||||||
/// Makes a backup of access entities of a specified type.
|
/// Makes a backup of access entities of a specified type.
|
||||||
void backupAccessEntities(
|
std::pair<String, BackupEntryPtr> makeBackupEntryForAccess(
|
||||||
BackupEntriesCollector & backup_entries_collector,
|
const std::vector<std::pair<UUID, AccessEntityPtr>> access_entities,
|
||||||
const String & data_path_in_backup,
|
const String & data_path_in_backup,
|
||||||
const AccessControl & access_control,
|
size_t counter,
|
||||||
AccessEntityType type);
|
const AccessControl & access_control);
|
||||||
|
|
||||||
|
|
||||||
/// Restores access entities from a backup.
|
/// Restores access entities from a backup.
|
||||||
class AccessRestoreTask
|
class AccessRestorerFromBackup
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AccessRestoreTask(
|
AccessRestorerFromBackup(const BackupPtr & backup_, const RestoreSettings & restore_settings_);
|
||||||
const BackupPtr & backup_, const RestoreSettings & restore_settings_, std::shared_ptr<IRestoreCoordination> restore_coordination_);
|
~AccessRestorerFromBackup();
|
||||||
~AccessRestoreTask();
|
|
||||||
|
|
||||||
/// Adds a data path to loads access entities from.
|
/// Adds a data path to loads access entities from.
|
||||||
void addDataPath(const String & data_path);
|
void addDataPath(const String & data_path, const QualifiedTableName & table_name_for_logs);
|
||||||
bool hasDataPath(const String & data_path) const;
|
|
||||||
|
|
||||||
/// Checks that the current user can do restoring.
|
/// Checks that the current user can do restoring.
|
||||||
AccessRightsElements getRequiredAccess() const;
|
AccessRightsElements getRequiredAccess() const;
|
||||||
|
|
||||||
/// Inserts all access entities loaded from all the paths added by addDataPath().
|
/// Inserts all access entities loaded from all the paths added by addDataPath().
|
||||||
void restore(AccessControl & access_control) const;
|
std::vector<std::pair<UUID, AccessEntityPtr>> getAccessEntities(const AccessControl & access_control) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
BackupPtr backup;
|
BackupPtr backup;
|
||||||
RestoreSettings restore_settings;
|
bool allow_unresolved_access_dependencies = false;
|
||||||
std::shared_ptr<IRestoreCoordination> restore_coordination;
|
std::vector<std::pair<UUID, AccessEntityPtr>> entities;
|
||||||
std::unordered_map<UUID, AccessEntityPtr> entities;
|
|
||||||
std::unordered_map<UUID, std::pair<String, AccessEntityType>> dependencies;
|
std::unordered_map<UUID, std::pair<String, AccessEntityType>> dependencies;
|
||||||
std::unordered_set<String> data_paths;
|
std::unordered_set<String> data_paths;
|
||||||
};
|
};
|
||||||
|
@ -459,20 +459,9 @@ UUID AccessControl::authenticate(const Credentials & credentials, const Poco::Ne
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AccessControl::backup(BackupEntriesCollector & backup_entries_collector, AccessEntityType type, const String & data_path_in_backup) const
|
void AccessControl::restoreFromBackup(RestorerFromBackup & restorer)
|
||||||
{
|
{
|
||||||
backupAccessEntities(backup_entries_collector, data_path_in_backup, *this, type);
|
MultipleAccessStorage::restoreFromBackup(restorer);
|
||||||
}
|
|
||||||
|
|
||||||
void AccessControl::restore(RestorerFromBackup & restorer, const String & data_path_in_backup)
|
|
||||||
{
|
|
||||||
/// The restorer must already know about `data_path_in_backup`, but let's check.
|
|
||||||
restorer.checkPathInBackupToRestoreAccess(data_path_in_backup);
|
|
||||||
}
|
|
||||||
|
|
||||||
void AccessControl::insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination)
|
|
||||||
{
|
|
||||||
MultipleAccessStorage::insertFromBackup(entities_from_backup, restore_settings, restore_coordination);
|
|
||||||
changes_notifier->sendNotifications();
|
changes_notifier->sendNotifications();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,8 +42,6 @@ class ClientInfo;
|
|||||||
class ExternalAuthenticators;
|
class ExternalAuthenticators;
|
||||||
class AccessChangesNotifier;
|
class AccessChangesNotifier;
|
||||||
struct Settings;
|
struct Settings;
|
||||||
class BackupEntriesCollector;
|
|
||||||
class RestorerFromBackup;
|
|
||||||
|
|
||||||
|
|
||||||
/// Manages access control entities.
|
/// Manages access control entities.
|
||||||
@ -121,8 +119,7 @@ public:
|
|||||||
UUID authenticate(const Credentials & credentials, const Poco::Net::IPAddress & address) const;
|
UUID authenticate(const Credentials & credentials, const Poco::Net::IPAddress & address) const;
|
||||||
|
|
||||||
/// Makes a backup of access entities.
|
/// Makes a backup of access entities.
|
||||||
void backup(BackupEntriesCollector & backup_entries_collector, AccessEntityType type, const String & data_path_in_backup) const;
|
void restoreFromBackup(RestorerFromBackup & restorer) override;
|
||||||
static void restore(RestorerFromBackup & restorer, const String & data_path_in_backup);
|
|
||||||
|
|
||||||
void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config);
|
void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config);
|
||||||
|
|
||||||
@ -198,8 +195,6 @@ public:
|
|||||||
/// Gets manager of notifications.
|
/// Gets manager of notifications.
|
||||||
AccessChangesNotifier & getChangesNotifier();
|
AccessChangesNotifier & getChangesNotifier();
|
||||||
|
|
||||||
void insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class ContextAccessCache;
|
class ContextAccessCache;
|
||||||
class CustomSettingsPrefixes;
|
class CustomSettingsPrefixes;
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include <Access/DiskAccessStorage.h>
|
#include <Access/DiskAccessStorage.h>
|
||||||
#include <Access/AccessEntityIO.h>
|
#include <Access/AccessEntityIO.h>
|
||||||
#include <Access/AccessChangesNotifier.h>
|
#include <Access/AccessChangesNotifier.h>
|
||||||
|
#include <Backups/RestorerFromBackup.h>
|
||||||
#include <Backups/RestoreSettings.h>
|
#include <Backups/RestoreSettings.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
@ -650,19 +651,24 @@ void DiskAccessStorage::deleteAccessEntityOnDisk(const UUID & id) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void DiskAccessStorage::insertFromBackup(
|
void DiskAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
|
||||||
const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup,
|
|
||||||
const RestoreSettings & restore_settings,
|
|
||||||
std::shared_ptr<IRestoreCoordination>)
|
|
||||||
{
|
{
|
||||||
if (!isRestoreAllowed())
|
if (!isRestoreAllowed())
|
||||||
throwRestoreNotAllowed();
|
throwRestoreNotAllowed();
|
||||||
|
|
||||||
bool replace_if_exists = (restore_settings.create_access == RestoreAccessCreationMode::kReplace);
|
auto entities = restorer.getAccessEntitiesToRestore();
|
||||||
bool throw_if_exists = (restore_settings.create_access == RestoreAccessCreationMode::kCreate);
|
if (entities.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
for (const auto & [id, entity] : entities_from_backup)
|
auto create_access = restorer.getRestoreSettings().create_access;
|
||||||
insertWithID(id, entity, replace_if_exists, throw_if_exists);
|
bool replace_if_exists = (create_access == RestoreAccessCreationMode::kReplace);
|
||||||
|
bool throw_if_exists = (create_access == RestoreAccessCreationMode::kCreate);
|
||||||
|
|
||||||
|
restorer.addDataRestoreTask([this, entities = std::move(entities), replace_if_exists, throw_if_exists]
|
||||||
|
{
|
||||||
|
for (const auto & [id, entity] : entities)
|
||||||
|
insertWithID(id, entity, replace_if_exists, throw_if_exists);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ public:
|
|||||||
bool exists(const UUID & id) const override;
|
bool exists(const UUID & id) const override;
|
||||||
|
|
||||||
bool isBackupAllowed() const override { return backup_allowed; }
|
bool isBackupAllowed() const override { return backup_allowed; }
|
||||||
void insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination) override;
|
void restoreFromBackup(RestorerFromBackup & restorer) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||||
|
@ -2,9 +2,12 @@
|
|||||||
#include <Access/Authentication.h>
|
#include <Access/Authentication.h>
|
||||||
#include <Access/Credentials.h>
|
#include <Access/Credentials.h>
|
||||||
#include <Access/User.h>
|
#include <Access/User.h>
|
||||||
|
#include <Access/AccessBackup.h>
|
||||||
|
#include <Backups/BackupEntriesCollector.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
#include <Poco/UUIDGenerator.h>
|
#include <Poco/UUIDGenerator.h>
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
#include <base/FnTraits.h>
|
#include <base/FnTraits.h>
|
||||||
@ -520,26 +523,30 @@ bool IAccessStorage::isAddressAllowed(const User & user, const Poco::Net::IPAddr
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool IAccessStorage::isRestoreAllowed() const
|
void IAccessStorage::backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const
|
||||||
{
|
|
||||||
return isBackupAllowed() && !isReadOnly();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<std::pair<UUID, AccessEntityPtr>> IAccessStorage::readAllForBackup(AccessEntityType type, const BackupSettings &) const
|
|
||||||
{
|
{
|
||||||
if (!isBackupAllowed())
|
if (!isBackupAllowed())
|
||||||
throwBackupNotAllowed();
|
throwBackupNotAllowed();
|
||||||
|
|
||||||
auto res = readAllWithIDs(type);
|
auto entities = readAllWithIDs(type);
|
||||||
boost::range::remove_erase_if(res, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
|
boost::range::remove_erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
|
||||||
return res;
|
|
||||||
|
auto backup_entry = makeBackupEntryForAccess(
|
||||||
|
entities,
|
||||||
|
data_path_in_backup,
|
||||||
|
backup_entries_collector.getAccessCounter(type),
|
||||||
|
backup_entries_collector.getContext()->getAccessControl());
|
||||||
|
|
||||||
|
backup_entries_collector.addBackupEntry(backup_entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void IAccessStorage::insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> &, const RestoreSettings &, std::shared_ptr<IRestoreCoordination>)
|
|
||||||
|
void IAccessStorage::restoreFromBackup(RestorerFromBackup &)
|
||||||
{
|
{
|
||||||
if (!isRestoreAllowed())
|
if (!isRestoreAllowed())
|
||||||
throwRestoreNotAllowed();
|
throwRestoreNotAllowed();
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "insertFromBackup() is not implemented in {}", getStorageType());
|
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "restoreFromBackup() is not implemented in {}", getStorageType());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,9 +18,8 @@ struct User;
|
|||||||
class Credentials;
|
class Credentials;
|
||||||
class ExternalAuthenticators;
|
class ExternalAuthenticators;
|
||||||
enum class AuthenticationType;
|
enum class AuthenticationType;
|
||||||
struct BackupSettings;
|
class BackupEntriesCollector;
|
||||||
struct RestoreSettings;
|
class RestorerFromBackup;
|
||||||
class IRestoreCoordination;
|
|
||||||
|
|
||||||
/// Contains entities, i.e. instances of classes derived from IAccessEntity.
|
/// Contains entities, i.e. instances of classes derived from IAccessEntity.
|
||||||
/// The implementations of this class MUST be thread-safe.
|
/// The implementations of this class MUST be thread-safe.
|
||||||
@ -158,11 +157,11 @@ public:
|
|||||||
|
|
||||||
/// Returns true if this storage can be stored to or restored from a backup.
|
/// Returns true if this storage can be stored to or restored from a backup.
|
||||||
virtual bool isBackupAllowed() const { return false; }
|
virtual bool isBackupAllowed() const { return false; }
|
||||||
virtual bool isRestoreAllowed() const;
|
virtual bool isRestoreAllowed() const { return isBackupAllowed() && !isReadOnly(); }
|
||||||
|
|
||||||
/// Makes a backup of this access storage.
|
/// Makes a backup of this access storage.
|
||||||
virtual std::vector<std::pair<UUID, AccessEntityPtr>> readAllForBackup(AccessEntityType type, const BackupSettings & backup_settings) const;
|
virtual void backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const;
|
||||||
virtual void insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination);
|
virtual void restoreFromBackup(RestorerFromBackup & restorer);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual std::optional<UUID> findImpl(AccessEntityType type, const String & name) const = 0;
|
virtual std::optional<UUID> findImpl(AccessEntityType type, const String & name) const = 0;
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include <Access/MemoryAccessStorage.h>
|
#include <Access/MemoryAccessStorage.h>
|
||||||
#include <Access/AccessChangesNotifier.h>
|
#include <Access/AccessChangesNotifier.h>
|
||||||
|
#include <Backups/RestorerFromBackup.h>
|
||||||
#include <Backups/RestoreSettings.h>
|
#include <Backups/RestoreSettings.h>
|
||||||
#include <base/scope_guard.h>
|
#include <base/scope_guard.h>
|
||||||
#include <boost/container/flat_set.hpp>
|
#include <boost/container/flat_set.hpp>
|
||||||
@ -272,19 +273,24 @@ void MemoryAccessStorage::setAll(const std::vector<std::pair<UUID, AccessEntityP
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void MemoryAccessStorage::insertFromBackup(
|
void MemoryAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
|
||||||
const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup,
|
|
||||||
const RestoreSettings & restore_settings,
|
|
||||||
std::shared_ptr<IRestoreCoordination>)
|
|
||||||
{
|
{
|
||||||
if (!isRestoreAllowed())
|
if (!isRestoreAllowed())
|
||||||
throwRestoreNotAllowed();
|
throwRestoreNotAllowed();
|
||||||
|
|
||||||
bool replace_if_exists = (restore_settings.create_access == RestoreAccessCreationMode::kReplace);
|
auto entities = restorer.getAccessEntitiesToRestore();
|
||||||
bool throw_if_exists = (restore_settings.create_access == RestoreAccessCreationMode::kCreate);
|
if (entities.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
for (const auto & [id, entity] : entities_from_backup)
|
auto create_access = restorer.getRestoreSettings().create_access;
|
||||||
insertWithID(id, entity, replace_if_exists, throw_if_exists);
|
bool replace_if_exists = (create_access == RestoreAccessCreationMode::kReplace);
|
||||||
|
bool throw_if_exists = (create_access == RestoreAccessCreationMode::kCreate);
|
||||||
|
|
||||||
|
restorer.addDataRestoreTask([this, entities = std::move(entities), replace_if_exists, throw_if_exists]
|
||||||
|
{
|
||||||
|
for (const auto & [id, entity] : entities)
|
||||||
|
insertWithID(id, entity, replace_if_exists, throw_if_exists);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ public:
|
|||||||
bool exists(const UUID & id) const override;
|
bool exists(const UUID & id) const override;
|
||||||
|
|
||||||
bool isBackupAllowed() const override { return backup_allowed; }
|
bool isBackupAllowed() const override { return backup_allowed; }
|
||||||
void insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination) override;
|
void restoreFromBackup(RestorerFromBackup & restorer) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||||
|
@ -383,40 +383,38 @@ bool MultipleAccessStorage::isRestoreAllowed() const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::vector<std::pair<UUID, AccessEntityPtr>> MultipleAccessStorage::readAllForBackup(AccessEntityType type, const BackupSettings & backup_settings) const
|
void MultipleAccessStorage::backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const
|
||||||
{
|
{
|
||||||
std::vector<std::pair<UUID, AccessEntityPtr>> res;
|
|
||||||
auto storages = getStoragesInternal();
|
auto storages = getStoragesInternal();
|
||||||
size_t count = 0;
|
bool allowed = false;
|
||||||
|
|
||||||
for (const auto & storage : *storages)
|
for (const auto & storage : *storages)
|
||||||
{
|
{
|
||||||
if (storage->isBackupAllowed())
|
if (storage->isBackupAllowed())
|
||||||
{
|
{
|
||||||
insertAtEnd(res, storage->readAllForBackup(type, backup_settings));
|
storage->backup(backup_entries_collector, data_path_in_backup, type);
|
||||||
++count;
|
allowed = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!count)
|
if (!allowed)
|
||||||
throwBackupNotAllowed();
|
throwBackupNotAllowed();
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MultipleAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
|
||||||
void MultipleAccessStorage::insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination)
|
|
||||||
{
|
{
|
||||||
auto storages = getStoragesInternal();
|
auto storages = getStoragesInternal();
|
||||||
|
|
||||||
for (const auto & storage : *storages)
|
for (const auto & storage : *storages)
|
||||||
{
|
{
|
||||||
if (storage->isRestoreAllowed())
|
if (storage->isRestoreAllowed())
|
||||||
{
|
{
|
||||||
storage->insertFromBackup(entities_from_backup, restore_settings, restore_coordination);
|
storage->restoreFromBackup(restorer);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throwRestoreNotAllowed();
|
|
||||||
|
throwBackupNotAllowed();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -45,8 +45,8 @@ public:
|
|||||||
|
|
||||||
bool isBackupAllowed() const override;
|
bool isBackupAllowed() const override;
|
||||||
bool isRestoreAllowed() const override;
|
bool isRestoreAllowed() const override;
|
||||||
std::vector<std::pair<UUID, AccessEntityPtr>> readAllForBackup(AccessEntityType type, const BackupSettings & backup_settings) const override;
|
void backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const override;
|
||||||
void insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination) override;
|
void restoreFromBackup(RestorerFromBackup & restorer) override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||||
|
@ -2,10 +2,14 @@
|
|||||||
#include <Access/MemoryAccessStorage.h>
|
#include <Access/MemoryAccessStorage.h>
|
||||||
#include <Access/ReplicatedAccessStorage.h>
|
#include <Access/ReplicatedAccessStorage.h>
|
||||||
#include <Access/AccessChangesNotifier.h>
|
#include <Access/AccessChangesNotifier.h>
|
||||||
|
#include <Access/AccessBackup.h>
|
||||||
|
#include <Backups/BackupEntriesCollector.h>
|
||||||
|
#include <Backups/RestorerFromBackup.h>
|
||||||
#include <Backups/RestoreSettings.h>
|
#include <Backups/RestoreSettings.h>
|
||||||
|
#include <Backups/IBackupCoordination.h>
|
||||||
#include <Backups/IRestoreCoordination.h>
|
#include <Backups/IRestoreCoordination.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <boost/container/flat_set.hpp>
|
#include <Interpreters/Context.h>
|
||||||
#include <Common/ZooKeeper/KeeperException.h>
|
#include <Common/ZooKeeper/KeeperException.h>
|
||||||
#include <Common/ZooKeeper/Types.h>
|
#include <Common/ZooKeeper/Types.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||||
@ -13,6 +17,7 @@
|
|||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <base/sleep.h>
|
#include <base/sleep.h>
|
||||||
|
#include <boost/range/algorithm_ext/erase.hpp>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -613,19 +618,64 @@ AccessEntityPtr ReplicatedAccessStorage::readImpl(const UUID & id, bool throw_if
|
|||||||
return entry.entity;
|
return entry.entity;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReplicatedAccessStorage::insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination)
|
|
||||||
|
void ReplicatedAccessStorage::backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const
|
||||||
|
{
|
||||||
|
if (!isBackupAllowed())
|
||||||
|
throwBackupNotAllowed();
|
||||||
|
|
||||||
|
auto entities = readAllWithIDs(type);
|
||||||
|
boost::range::remove_erase_if(entities, [](const std::pair<UUID, AccessEntityPtr> & x) { return !x.second->isBackupAllowed(); });
|
||||||
|
|
||||||
|
auto backup_entry_with_path = makeBackupEntryForAccess(
|
||||||
|
entities,
|
||||||
|
data_path_in_backup,
|
||||||
|
backup_entries_collector.getAccessCounter(type),
|
||||||
|
backup_entries_collector.getContext()->getAccessControl());
|
||||||
|
|
||||||
|
auto backup_coordination = backup_entries_collector.getBackupCoordination();
|
||||||
|
backup_coordination->addReplicatedAccessPath(zookeeper_path, backup_entry_with_path.first);
|
||||||
|
String current_host_id = backup_entries_collector.getBackupSettings().host_id;
|
||||||
|
backup_coordination->setReplicatedAccessHost(zookeeper_path, current_host_id);
|
||||||
|
|
||||||
|
backup_entries_collector.addPostTask(
|
||||||
|
[backup_entry = backup_entry_with_path.second,
|
||||||
|
zookeeper_path = zookeeper_path,
|
||||||
|
current_host_id,
|
||||||
|
&backup_entries_collector,
|
||||||
|
backup_coordination]
|
||||||
|
{
|
||||||
|
if (current_host_id != backup_coordination->getReplicatedAccessHost(zookeeper_path))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (const String & path : backup_coordination->getReplicatedAccessPaths(zookeeper_path))
|
||||||
|
backup_entries_collector.addBackupEntry(path, backup_entry);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ReplicatedAccessStorage::restoreFromBackup(RestorerFromBackup & restorer)
|
||||||
{
|
{
|
||||||
if (!isRestoreAllowed())
|
if (!isRestoreAllowed())
|
||||||
throwRestoreNotAllowed();
|
throwRestoreNotAllowed();
|
||||||
|
|
||||||
|
auto restore_coordination = restorer.getRestoreCoordination();
|
||||||
if (!restore_coordination->acquireReplicatedAccessStorage(zookeeper_path))
|
if (!restore_coordination->acquireReplicatedAccessStorage(zookeeper_path))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bool replace_if_exists = (restore_settings.create_access == RestoreAccessCreationMode::kReplace);
|
auto entities = restorer.getAccessEntitiesToRestore();
|
||||||
bool throw_if_exists = (restore_settings.create_access == RestoreAccessCreationMode::kCreate);
|
if (entities.empty())
|
||||||
|
return;
|
||||||
|
|
||||||
for (const auto & [id, entity] : entities_from_backup)
|
auto create_access = restorer.getRestoreSettings().create_access;
|
||||||
insertWithID(id, entity, replace_if_exists, throw_if_exists);
|
bool replace_if_exists = (create_access == RestoreAccessCreationMode::kReplace);
|
||||||
|
bool throw_if_exists = (create_access == RestoreAccessCreationMode::kCreate);
|
||||||
|
|
||||||
|
restorer.addDataRestoreTask([this, entities = std::move(entities), replace_if_exists, throw_if_exists]
|
||||||
|
{
|
||||||
|
for (const auto & [id, entity] : entities)
|
||||||
|
insertWithID(id, entity, replace_if_exists, throw_if_exists);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,8 @@ public:
|
|||||||
bool exists(const UUID & id) const override;
|
bool exists(const UUID & id) const override;
|
||||||
|
|
||||||
bool isBackupAllowed() const override { return backup_allowed; }
|
bool isBackupAllowed() const override { return backup_allowed; }
|
||||||
void insertFromBackup(const std::vector<std::pair<UUID, AccessEntityPtr>> & entities_from_backup, const RestoreSettings & restore_settings, std::shared_ptr<IRestoreCoordination> restore_coordination) override;
|
void backup(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, AccessEntityType type) const override;
|
||||||
|
void restoreFromBackup(RestorerFromBackup & restorer) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
String zookeeper_path;
|
String zookeeper_path;
|
||||||
|
@ -3,34 +3,6 @@
|
|||||||
#include <DataTypes/IDataType.h>
|
#include <DataTypes/IDataType.h>
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
|
||||||
#define FOR_BASIC_NUMERIC_TYPES(M) \
|
|
||||||
M(UInt8) \
|
|
||||||
M(UInt16) \
|
|
||||||
M(UInt32) \
|
|
||||||
M(UInt64) \
|
|
||||||
M(Int8) \
|
|
||||||
M(Int16) \
|
|
||||||
M(Int32) \
|
|
||||||
M(Int64) \
|
|
||||||
M(Float32) \
|
|
||||||
M(Float64)
|
|
||||||
|
|
||||||
#define FOR_NUMERIC_TYPES(M) \
|
|
||||||
M(UInt8) \
|
|
||||||
M(UInt16) \
|
|
||||||
M(UInt32) \
|
|
||||||
M(UInt64) \
|
|
||||||
M(UInt128) \
|
|
||||||
M(UInt256) \
|
|
||||||
M(Int8) \
|
|
||||||
M(Int16) \
|
|
||||||
M(Int32) \
|
|
||||||
M(Int64) \
|
|
||||||
M(Int128) \
|
|
||||||
M(Int256) \
|
|
||||||
M(Float32) \
|
|
||||||
M(Float64)
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
struct Settings;
|
struct Settings;
|
||||||
|
@ -131,7 +131,7 @@ namespace
|
|||||||
BackupCoordinationDistributed::BackupCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
|
BackupCoordinationDistributed::BackupCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
|
||||||
: zookeeper_path(zookeeper_path_)
|
: zookeeper_path(zookeeper_path_)
|
||||||
, get_zookeeper(get_zookeeper_)
|
, get_zookeeper(get_zookeeper_)
|
||||||
, stage_sync(zookeeper_path_ + "/stage", get_zookeeper_, &Poco::Logger::get("BackupCoordination"))
|
, status_sync(zookeeper_path_ + "/status", get_zookeeper_, &Poco::Logger::get("BackupCoordination"))
|
||||||
{
|
{
|
||||||
createRootNodes();
|
createRootNodes();
|
||||||
}
|
}
|
||||||
@ -145,6 +145,8 @@ void BackupCoordinationDistributed::createRootNodes()
|
|||||||
zookeeper->createIfNotExists(zookeeper_path, "");
|
zookeeper->createIfNotExists(zookeeper_path, "");
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/repl_part_names", "");
|
zookeeper->createIfNotExists(zookeeper_path + "/repl_part_names", "");
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/repl_data_paths", "");
|
zookeeper->createIfNotExists(zookeeper_path + "/repl_data_paths", "");
|
||||||
|
zookeeper->createIfNotExists(zookeeper_path + "/repl_access_host", "");
|
||||||
|
zookeeper->createIfNotExists(zookeeper_path + "/repl_access_paths", "");
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/file_names", "");
|
zookeeper->createIfNotExists(zookeeper_path + "/file_names", "");
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/file_infos", "");
|
zookeeper->createIfNotExists(zookeeper_path + "/file_infos", "");
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/archive_suffixes", "");
|
zookeeper->createIfNotExists(zookeeper_path + "/archive_suffixes", "");
|
||||||
@ -157,19 +159,24 @@ void BackupCoordinationDistributed::removeAllNodes()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void BackupCoordinationDistributed::syncStage(const String & current_host, int new_stage, const Strings & wait_hosts, std::chrono::seconds timeout)
|
void BackupCoordinationDistributed::setStatus(const String & current_host, const String & new_status, const String & message)
|
||||||
{
|
{
|
||||||
stage_sync.syncStage(current_host, new_stage, wait_hosts, timeout);
|
status_sync.set(current_host, new_status, message);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupCoordinationDistributed::syncStageError(const String & current_host, const String & error_message)
|
Strings BackupCoordinationDistributed::setStatusAndWait(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts)
|
||||||
{
|
{
|
||||||
stage_sync.syncStageError(current_host, error_message);
|
return status_sync.setAndWait(current_host, new_status, message, all_hosts);
|
||||||
|
}
|
||||||
|
|
||||||
|
Strings BackupCoordinationDistributed::setStatusAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, UInt64 timeout_ms)
|
||||||
|
{
|
||||||
|
return status_sync.setAndWaitFor(current_host, new_status, message, all_hosts, timeout_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void BackupCoordinationDistributed::addReplicatedPartNames(
|
void BackupCoordinationDistributed::addReplicatedPartNames(
|
||||||
const String & table_zk_path,
|
const String & table_shared_id,
|
||||||
const String & table_name_for_logs,
|
const String & table_name_for_logs,
|
||||||
const String & replica_name,
|
const String & replica_name,
|
||||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
|
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
|
||||||
@ -181,39 +188,39 @@ void BackupCoordinationDistributed::addReplicatedPartNames(
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto zookeeper = get_zookeeper();
|
auto zookeeper = get_zookeeper();
|
||||||
String path = zookeeper_path + "/repl_part_names/" + escapeForFileName(table_zk_path);
|
String path = zookeeper_path + "/repl_part_names/" + escapeForFileName(table_shared_id);
|
||||||
zookeeper->createIfNotExists(path, "");
|
zookeeper->createIfNotExists(path, "");
|
||||||
path += "/" + escapeForFileName(replica_name);
|
path += "/" + escapeForFileName(replica_name);
|
||||||
zookeeper->create(path, ReplicatedPartNames::serialize(part_names_and_checksums, table_name_for_logs), zkutil::CreateMode::Persistent);
|
zookeeper->create(path, ReplicatedPartNames::serialize(part_names_and_checksums, table_name_for_logs), zkutil::CreateMode::Persistent);
|
||||||
}
|
}
|
||||||
|
|
||||||
Strings BackupCoordinationDistributed::getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const
|
Strings BackupCoordinationDistributed::getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
prepareReplicatedPartNames();
|
prepareReplicatedPartNames();
|
||||||
return replicated_part_names->getPartNames(table_zk_path, replica_name);
|
return replicated_part_names->getPartNames(table_shared_id, replica_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void BackupCoordinationDistributed::addReplicatedDataPath(
|
void BackupCoordinationDistributed::addReplicatedDataPath(
|
||||||
const String & table_zk_path, const String & data_path)
|
const String & table_shared_id, const String & data_path)
|
||||||
{
|
{
|
||||||
auto zookeeper = get_zookeeper();
|
auto zookeeper = get_zookeeper();
|
||||||
String path = zookeeper_path + "/repl_data_paths/" + escapeForFileName(table_zk_path);
|
String path = zookeeper_path + "/repl_data_paths/" + escapeForFileName(table_shared_id);
|
||||||
|
zookeeper->createIfNotExists(path, "");
|
||||||
|
path += "/" + escapeForFileName(data_path);
|
||||||
zookeeper->createIfNotExists(path, "");
|
zookeeper->createIfNotExists(path, "");
|
||||||
path += "/";
|
|
||||||
zookeeper->create(path, data_path, zkutil::CreateMode::PersistentSequential);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Strings BackupCoordinationDistributed::getReplicatedDataPaths(const String & table_zk_path) const
|
Strings BackupCoordinationDistributed::getReplicatedDataPaths(const String & table_shared_id) const
|
||||||
{
|
{
|
||||||
auto zookeeper = get_zookeeper();
|
auto zookeeper = get_zookeeper();
|
||||||
String path = zookeeper_path + "/repl_data_paths/" + escapeForFileName(table_zk_path);
|
String path = zookeeper_path + "/repl_data_paths/" + escapeForFileName(table_shared_id);
|
||||||
Strings children = zookeeper->getChildren(path);
|
Strings children = zookeeper->getChildren(path);
|
||||||
Strings data_paths;
|
Strings data_paths;
|
||||||
data_paths.reserve(children.size());
|
data_paths.reserve(children.size());
|
||||||
for (const String & child : children)
|
for (const String & child : children)
|
||||||
data_paths.push_back(zookeeper->get(path + "/" + child));
|
data_paths.push_back(unescapeForFileName(child));
|
||||||
return data_paths;
|
return data_paths;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,6 +247,47 @@ void BackupCoordinationDistributed::prepareReplicatedPartNames() const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void BackupCoordinationDistributed::addReplicatedAccessPath(const String & access_zk_path, const String & file_path)
|
||||||
|
{
|
||||||
|
auto zookeeper = get_zookeeper();
|
||||||
|
String path = zookeeper_path + "/repl_access_paths/" + escapeForFileName(access_zk_path);
|
||||||
|
zookeeper->createIfNotExists(path, "");
|
||||||
|
path += "/" + escapeForFileName(file_path);
|
||||||
|
zookeeper->createIfNotExists(path, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
Strings BackupCoordinationDistributed::getReplicatedAccessPaths(const String & access_zk_path) const
|
||||||
|
{
|
||||||
|
auto zookeeper = get_zookeeper();
|
||||||
|
String path = zookeeper_path + "/repl_access_paths/" + escapeForFileName(access_zk_path);
|
||||||
|
Strings children = zookeeper->getChildren(path);
|
||||||
|
Strings file_paths;
|
||||||
|
file_paths.reserve(children.size());
|
||||||
|
for (const String & child : children)
|
||||||
|
file_paths.push_back(unescapeForFileName(child));
|
||||||
|
return file_paths;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupCoordinationDistributed::setReplicatedAccessHost(const String & access_zk_path, const String & host_id)
|
||||||
|
{
|
||||||
|
auto zookeeper = get_zookeeper();
|
||||||
|
String path = zookeeper_path + "/repl_access_host/" + escapeForFileName(access_zk_path);
|
||||||
|
auto code = zookeeper->tryCreate(path, host_id, zkutil::CreateMode::Persistent);
|
||||||
|
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
|
||||||
|
throw zkutil::KeeperException(code, path);
|
||||||
|
|
||||||
|
if (code == Coordination::Error::ZNODEEXISTS)
|
||||||
|
zookeeper->set(path, host_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
String BackupCoordinationDistributed::getReplicatedAccessHost(const String & access_zk_path) const
|
||||||
|
{
|
||||||
|
auto zookeeper = get_zookeeper();
|
||||||
|
String path = zookeeper_path + "/repl_access_host/" + escapeForFileName(access_zk_path);
|
||||||
|
return zookeeper->get(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void BackupCoordinationDistributed::addFileInfo(const FileInfo & file_info, bool & is_data_file_required)
|
void BackupCoordinationDistributed::addFileInfo(const FileInfo & file_info, bool & is_data_file_required)
|
||||||
{
|
{
|
||||||
auto zookeeper = get_zookeeper();
|
auto zookeeper = get_zookeeper();
|
||||||
|
@ -14,19 +14,26 @@ public:
|
|||||||
BackupCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_);
|
BackupCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_);
|
||||||
~BackupCoordinationDistributed() override;
|
~BackupCoordinationDistributed() override;
|
||||||
|
|
||||||
void syncStage(const String & current_host, int new_stage, const Strings & wait_hosts, std::chrono::seconds timeout) override;
|
void setStatus(const String & current_host, const String & new_status, const String & message) override;
|
||||||
void syncStageError(const String & current_host, const String & error_message) override;
|
Strings setStatusAndWait(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts) override;
|
||||||
|
Strings setStatusAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, UInt64 timeout_ms) override;
|
||||||
|
|
||||||
void addReplicatedPartNames(
|
void addReplicatedPartNames(
|
||||||
const String & table_zk_path,
|
const String & table_shared_id,
|
||||||
const String & table_name_for_logs,
|
const String & table_name_for_logs,
|
||||||
const String & replica_name,
|
const String & replica_name,
|
||||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
|
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
|
||||||
|
|
||||||
Strings getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const override;
|
Strings getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const override;
|
||||||
|
|
||||||
void addReplicatedDataPath(const String & table_zk_path, const String & data_path) override;
|
void addReplicatedDataPath(const String & table_shared_id, const String & data_path) override;
|
||||||
Strings getReplicatedDataPaths(const String & table_zk_path) const override;
|
Strings getReplicatedDataPaths(const String & table_shared_id) const override;
|
||||||
|
|
||||||
|
void addReplicatedAccessPath(const String & access_zk_path, const String & file_path) override;
|
||||||
|
Strings getReplicatedAccessPaths(const String & access_zk_path) const override;
|
||||||
|
|
||||||
|
void setReplicatedAccessHost(const String & access_zk_path, const String & host_id) override;
|
||||||
|
String getReplicatedAccessHost(const String & access_zk_path) const override;
|
||||||
|
|
||||||
void addFileInfo(const FileInfo & file_info, bool & is_data_file_required) override;
|
void addFileInfo(const FileInfo & file_info, bool & is_data_file_required) override;
|
||||||
void updateFileInfo(const FileInfo & file_info) override;
|
void updateFileInfo(const FileInfo & file_info) override;
|
||||||
@ -51,7 +58,7 @@ private:
|
|||||||
const String zookeeper_path;
|
const String zookeeper_path;
|
||||||
const zkutil::GetZooKeeper get_zookeeper;
|
const zkutil::GetZooKeeper get_zookeeper;
|
||||||
|
|
||||||
BackupCoordinationStageSync stage_sync;
|
BackupCoordinationStatusSync status_sync;
|
||||||
|
|
||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
mutable std::optional<BackupCoordinationReplicatedPartNames> replicated_part_names;
|
mutable std::optional<BackupCoordinationReplicatedPartNames> replicated_part_names;
|
||||||
|
@ -157,7 +157,7 @@ BackupCoordinationReplicatedPartNames::BackupCoordinationReplicatedPartNames() =
|
|||||||
BackupCoordinationReplicatedPartNames::~BackupCoordinationReplicatedPartNames() = default;
|
BackupCoordinationReplicatedPartNames::~BackupCoordinationReplicatedPartNames() = default;
|
||||||
|
|
||||||
void BackupCoordinationReplicatedPartNames::addPartNames(
|
void BackupCoordinationReplicatedPartNames::addPartNames(
|
||||||
const String & table_zk_path,
|
const String & table_shared_id,
|
||||||
const String & table_name_for_logs,
|
const String & table_name_for_logs,
|
||||||
const String & replica_name,
|
const String & replica_name,
|
||||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
|
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
|
||||||
@ -165,7 +165,7 @@ void BackupCoordinationReplicatedPartNames::addPartNames(
|
|||||||
if (part_names_prepared)
|
if (part_names_prepared)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "addPartNames() must not be called after getPartNames()");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "addPartNames() must not be called after getPartNames()");
|
||||||
|
|
||||||
auto & table_info = table_infos[table_zk_path];
|
auto & table_info = table_infos[table_shared_id];
|
||||||
if (!table_info.covered_parts_finder)
|
if (!table_info.covered_parts_finder)
|
||||||
table_info.covered_parts_finder = std::make_unique<CoveredPartsFinder>(table_name_for_logs);
|
table_info.covered_parts_finder = std::make_unique<CoveredPartsFinder>(table_name_for_logs);
|
||||||
|
|
||||||
@ -207,10 +207,10 @@ void BackupCoordinationReplicatedPartNames::addPartNames(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Strings BackupCoordinationReplicatedPartNames::getPartNames(const String & table_zk_path, const String & replica_name) const
|
Strings BackupCoordinationReplicatedPartNames::getPartNames(const String & table_shared_id, const String & replica_name) const
|
||||||
{
|
{
|
||||||
preparePartNames();
|
preparePartNames();
|
||||||
auto it = table_infos.find(table_zk_path);
|
auto it = table_infos.find(table_shared_id);
|
||||||
if (it == table_infos.end())
|
if (it == table_infos.end())
|
||||||
return {};
|
return {};
|
||||||
const auto & replicas_parts = it->second.replicas_parts;
|
const auto & replicas_parts = it->second.replicas_parts;
|
||||||
@ -243,7 +243,7 @@ void BackupCoordinationReplicatedPartNames::preparePartNames() const
|
|||||||
|
|
||||||
|
|
||||||
/// Helps to wait until all hosts come to a specified stage.
|
/// Helps to wait until all hosts come to a specified stage.
|
||||||
BackupCoordinationStageSync::BackupCoordinationStageSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, Poco::Logger * log_)
|
BackupCoordinationStatusSync::BackupCoordinationStatusSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, Poco::Logger * log_)
|
||||||
: zookeeper_path(zookeeper_path_)
|
: zookeeper_path(zookeeper_path_)
|
||||||
, get_zookeeper(get_zookeeper_)
|
, get_zookeeper(get_zookeeper_)
|
||||||
, log(log_)
|
, log(log_)
|
||||||
@ -251,63 +251,78 @@ BackupCoordinationStageSync::BackupCoordinationStageSync(const String & zookeepe
|
|||||||
createRootNodes();
|
createRootNodes();
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupCoordinationStageSync::createRootNodes()
|
void BackupCoordinationStatusSync::createRootNodes()
|
||||||
{
|
{
|
||||||
auto zookeeper = get_zookeeper();
|
auto zookeeper = get_zookeeper();
|
||||||
zookeeper->createAncestors(zookeeper_path);
|
zookeeper->createAncestors(zookeeper_path);
|
||||||
zookeeper->createIfNotExists(zookeeper_path, "");
|
zookeeper->createIfNotExists(zookeeper_path, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupCoordinationStageSync::syncStage(const String & current_host, int new_stage, const Strings & wait_hosts, std::chrono::seconds timeout)
|
void BackupCoordinationStatusSync::set(const String & current_host, const String & new_status, const String & message)
|
||||||
{
|
{
|
||||||
/// Put new stage to ZooKeeper.
|
setImpl(current_host, new_status, message, {}, {});
|
||||||
auto zookeeper = get_zookeeper();
|
}
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/" + current_host + "|" + std::to_string(new_stage), "");
|
|
||||||
|
|
||||||
if (wait_hosts.empty() || ((wait_hosts.size() == 1) && (wait_hosts.front() == current_host)))
|
Strings BackupCoordinationStatusSync::setAndWait(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts)
|
||||||
return;
|
{
|
||||||
|
return setImpl(current_host, new_status, message, all_hosts, {});
|
||||||
|
}
|
||||||
|
|
||||||
|
Strings BackupCoordinationStatusSync::setAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, UInt64 timeout_ms)
|
||||||
|
{
|
||||||
|
return setImpl(current_host, new_status, message, all_hosts, timeout_ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
Strings BackupCoordinationStatusSync::setImpl(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, const std::optional<UInt64> & timeout_ms)
|
||||||
|
{
|
||||||
|
/// Put new status to ZooKeeper.
|
||||||
|
auto zookeeper = get_zookeeper();
|
||||||
|
zookeeper->createIfNotExists(zookeeper_path + "/" + current_host + "|" + new_status, message);
|
||||||
|
|
||||||
|
if (all_hosts.empty() || (new_status == kErrorStatus))
|
||||||
|
return {};
|
||||||
|
|
||||||
|
if ((all_hosts.size() == 1) && (all_hosts.front() == current_host))
|
||||||
|
return {message};
|
||||||
|
|
||||||
/// Wait for other hosts.
|
/// Wait for other hosts.
|
||||||
|
|
||||||
/// Current stages of all hosts.
|
Strings ready_hosts_results;
|
||||||
|
ready_hosts_results.resize(all_hosts.size());
|
||||||
|
|
||||||
|
std::map<String, std::vector<size_t> /* index in `ready_hosts_results` */> unready_hosts;
|
||||||
|
for (size_t i = 0; i != all_hosts.size(); ++i)
|
||||||
|
unready_hosts[all_hosts[i]].push_back(i);
|
||||||
|
|
||||||
std::optional<String> host_with_error;
|
std::optional<String> host_with_error;
|
||||||
std::optional<String> error_message;
|
std::optional<String> error_message;
|
||||||
|
|
||||||
std::map<String, std::optional<int>> unready_hosts;
|
|
||||||
for (const String & host : wait_hosts)
|
|
||||||
unready_hosts.emplace(host, std::optional<int>{});
|
|
||||||
|
|
||||||
/// Process ZooKeeper's nodes and set `all_hosts_ready` or `unready_host` or `error_message`.
|
/// Process ZooKeeper's nodes and set `all_hosts_ready` or `unready_host` or `error_message`.
|
||||||
auto process_zk_nodes = [&](const Strings & zk_nodes)
|
auto process_zk_nodes = [&](const Strings & zk_nodes)
|
||||||
{
|
{
|
||||||
for (const String & zk_node : zk_nodes)
|
for (const String & zk_node : zk_nodes)
|
||||||
{
|
{
|
||||||
if (zk_node == "error")
|
if (zk_node.starts_with("remove_watch-"))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
size_t separator_pos = zk_node.find('|');
|
||||||
|
if (separator_pos == String::npos)
|
||||||
|
throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "Unexpected zk node {}", zookeeper_path + "/" + zk_node);
|
||||||
|
String host = zk_node.substr(0, separator_pos);
|
||||||
|
String status = zk_node.substr(separator_pos + 1);
|
||||||
|
if (status == kErrorStatus)
|
||||||
{
|
{
|
||||||
String str = zookeeper->get(zookeeper_path + "/" + zk_node);
|
host_with_error = host;
|
||||||
size_t separator_pos = str.find('|');
|
error_message = zookeeper->get(zookeeper_path + "/" + zk_node);
|
||||||
if (separator_pos == String::npos)
|
|
||||||
throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "Unexpected value of zk node {}: {}", zookeeper_path + "/" + zk_node, str);
|
|
||||||
host_with_error = str.substr(0, separator_pos);
|
|
||||||
error_message = str.substr(separator_pos + 1);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else if (!zk_node.starts_with("remove_watch-"))
|
auto it = unready_hosts.find(host);
|
||||||
|
if ((it != unready_hosts.end()) && (status == new_status))
|
||||||
{
|
{
|
||||||
size_t separator_pos = zk_node.find('|');
|
String result = zookeeper->get(zookeeper_path + "/" + zk_node);
|
||||||
if (separator_pos == String::npos)
|
for (size_t i : it->second)
|
||||||
throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "Unexpected zk node {}", zookeeper_path + "/" + zk_node);
|
ready_hosts_results[i] = result;
|
||||||
String host = zk_node.substr(0, separator_pos);
|
unready_hosts.erase(it);
|
||||||
int found_stage = parseFromString<int>(zk_node.substr(separator_pos + 1));
|
|
||||||
auto it = unready_hosts.find(host);
|
|
||||||
if (it != unready_hosts.end())
|
|
||||||
{
|
|
||||||
auto & stage = it->second;
|
|
||||||
if (!stage || (stage < found_stage))
|
|
||||||
stage = found_stage;
|
|
||||||
if (stage >= new_stage)
|
|
||||||
unready_hosts.erase(it);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -324,7 +339,8 @@ void BackupCoordinationStageSync::syncStage(const String & current_host, int new
|
|||||||
|
|
||||||
auto watch_triggered = [&] { return !watch_set; };
|
auto watch_triggered = [&] { return !watch_set; };
|
||||||
|
|
||||||
bool use_timeout = (timeout.count() >= 0);
|
bool use_timeout = timeout_ms.has_value();
|
||||||
|
std::chrono::milliseconds timeout{timeout_ms.value_or(0)};
|
||||||
std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
|
std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
|
||||||
std::chrono::steady_clock::duration elapsed;
|
std::chrono::steady_clock::duration elapsed;
|
||||||
std::mutex dummy_mutex;
|
std::mutex dummy_mutex;
|
||||||
@ -369,12 +385,8 @@ void BackupCoordinationStageSync::syncStage(const String & current_host, int new
|
|||||||
unready_hosts.begin()->first,
|
unready_hosts.begin()->first,
|
||||||
to_string(elapsed));
|
to_string(elapsed));
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void BackupCoordinationStageSync::syncStageError(const String & current_host, const String & error_message)
|
return ready_hosts_results;
|
||||||
{
|
|
||||||
auto zookeeper = get_zookeeper();
|
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/error", current_host + "|" + error_message);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ public:
|
|||||||
/// getPartNames().
|
/// getPartNames().
|
||||||
/// Checksums are used only to control that parts under the same names on different replicas are the same.
|
/// Checksums are used only to control that parts under the same names on different replicas are the same.
|
||||||
void addPartNames(
|
void addPartNames(
|
||||||
const String & table_zk_path,
|
const String & table_shared_id,
|
||||||
const String & table_name_for_logs,
|
const String & table_name_for_logs,
|
||||||
const String & replica_name,
|
const String & replica_name,
|
||||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums);
|
const std::vector<PartNameAndChecksum> & part_names_and_checksums);
|
||||||
@ -32,7 +32,7 @@ public:
|
|||||||
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
|
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
|
||||||
/// This is the same list as it was added by call of the function addPartNames() but without duplications and without
|
/// This is the same list as it was added by call of the function addPartNames() but without duplications and without
|
||||||
/// parts covered by another parts.
|
/// parts covered by another parts.
|
||||||
Strings getPartNames(const String & table_zk_path, const String & replica_name) const;
|
Strings getPartNames(const String & table_shared_id, const String & replica_name) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void preparePartNames() const;
|
void preparePartNames() const;
|
||||||
@ -52,22 +52,26 @@ private:
|
|||||||
std::unique_ptr<CoveredPartsFinder> covered_parts_finder;
|
std::unique_ptr<CoveredPartsFinder> covered_parts_finder;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::map<String /* table_zk_path */, TableInfo> table_infos; /// Should be ordered because we need this map to be in the same order on every replica.
|
std::map<String /* table_shared_id */, TableInfo> table_infos; /// Should be ordered because we need this map to be in the same order on every replica.
|
||||||
mutable bool part_names_prepared = false;
|
mutable bool part_names_prepared = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/// Helps to wait until all hosts come to a specified stage.
|
/// Helps to wait until all hosts come to a specified stage.
|
||||||
class BackupCoordinationStageSync
|
class BackupCoordinationStatusSync
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BackupCoordinationStageSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, Poco::Logger * log_);
|
BackupCoordinationStatusSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, Poco::Logger * log_);
|
||||||
|
|
||||||
void syncStage(const String & current_host, int stage, const Strings & wait_hosts, std::chrono::seconds timeout);
|
void set(const String & current_host, const String & new_status, const String & message);
|
||||||
void syncStageError(const String & current_host, const String & error_message);
|
Strings setAndWait(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts);
|
||||||
|
Strings setAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, UInt64 timeout_ms);
|
||||||
|
|
||||||
|
static constexpr const char * kErrorStatus = "error";
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void createRootNodes();
|
void createRootNodes();
|
||||||
|
Strings setImpl(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, const std::optional<UInt64> & timeout_ms);
|
||||||
|
|
||||||
String zookeeper_path;
|
String zookeeper_path;
|
||||||
zkutil::GetZooKeeper get_zookeeper;
|
zkutil::GetZooKeeper get_zookeeper;
|
||||||
|
@ -13,43 +13,80 @@ using FileInfo = IBackupCoordination::FileInfo;
|
|||||||
BackupCoordinationLocal::BackupCoordinationLocal() = default;
|
BackupCoordinationLocal::BackupCoordinationLocal() = default;
|
||||||
BackupCoordinationLocal::~BackupCoordinationLocal() = default;
|
BackupCoordinationLocal::~BackupCoordinationLocal() = default;
|
||||||
|
|
||||||
void BackupCoordinationLocal::syncStage(const String &, int, const Strings &, std::chrono::seconds)
|
void BackupCoordinationLocal::setStatus(const String &, const String &, const String &)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupCoordinationLocal::syncStageError(const String &, const String &)
|
Strings BackupCoordinationLocal::setStatusAndWait(const String &, const String &, const String &, const Strings &)
|
||||||
{
|
{
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupCoordinationLocal::addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name, const std::vector<PartNameAndChecksum> & part_names_and_checksums)
|
Strings BackupCoordinationLocal::setStatusAndWaitFor(const String &, const String &, const String &, const Strings &, UInt64)
|
||||||
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupCoordinationLocal::addReplicatedPartNames(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name, const std::vector<PartNameAndChecksum> & part_names_and_checksums)
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
replicated_part_names.addPartNames(table_zk_path, table_name_for_logs, replica_name, part_names_and_checksums);
|
replicated_part_names.addPartNames(table_shared_id, table_name_for_logs, replica_name, part_names_and_checksums);
|
||||||
}
|
}
|
||||||
|
|
||||||
Strings BackupCoordinationLocal::getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const
|
Strings BackupCoordinationLocal::getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
return replicated_part_names.getPartNames(table_zk_path, replica_name);
|
return replicated_part_names.getPartNames(table_shared_id, replica_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void BackupCoordinationLocal::addReplicatedDataPath(const String & table_zk_path, const String & data_path)
|
void BackupCoordinationLocal::addReplicatedDataPath(const String & table_shared_id, const String & data_path)
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
replicated_data_paths[table_zk_path].push_back(data_path);
|
replicated_data_paths[table_shared_id].push_back(data_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
Strings BackupCoordinationLocal::getReplicatedDataPaths(const String & table_zk_path) const
|
Strings BackupCoordinationLocal::getReplicatedDataPaths(const String & table_shared_id) const
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
auto it = replicated_data_paths.find(table_zk_path);
|
auto it = replicated_data_paths.find(table_shared_id);
|
||||||
if (it == replicated_data_paths.end())
|
if (it == replicated_data_paths.end())
|
||||||
return {};
|
return {};
|
||||||
return it->second;
|
return it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void BackupCoordinationLocal::addReplicatedAccessPath(const String & access_zk_path, const String & file_path)
|
||||||
|
{
|
||||||
|
std::lock_guard lock{mutex};
|
||||||
|
replicated_access_paths[access_zk_path].push_back(file_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
Strings BackupCoordinationLocal::getReplicatedAccessPaths(const String & access_zk_path) const
|
||||||
|
{
|
||||||
|
std::lock_guard lock{mutex};
|
||||||
|
auto it = replicated_access_paths.find(access_zk_path);
|
||||||
|
if (it == replicated_access_paths.end())
|
||||||
|
return {};
|
||||||
|
return it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupCoordinationLocal::setReplicatedAccessHost(const String & access_zk_path, const String & host_id)
|
||||||
|
{
|
||||||
|
std::lock_guard lock{mutex};
|
||||||
|
replicated_access_hosts[access_zk_path] = host_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
String BackupCoordinationLocal::getReplicatedAccessHost(const String & access_zk_path) const
|
||||||
|
{
|
||||||
|
std::lock_guard lock{mutex};
|
||||||
|
auto it = replicated_access_hosts.find(access_zk_path);
|
||||||
|
if (it == replicated_access_hosts.end())
|
||||||
|
return {};
|
||||||
|
return it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void BackupCoordinationLocal::addFileInfo(const FileInfo & file_info, bool & is_data_file_required)
|
void BackupCoordinationLocal::addFileInfo(const FileInfo & file_info, bool & is_data_file_required)
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
|
@ -19,15 +19,22 @@ public:
|
|||||||
BackupCoordinationLocal();
|
BackupCoordinationLocal();
|
||||||
~BackupCoordinationLocal() override;
|
~BackupCoordinationLocal() override;
|
||||||
|
|
||||||
void syncStage(const String & current_host, int stage, const Strings & wait_hosts, std::chrono::seconds timeout) override;
|
void setStatus(const String & current_host, const String & new_status, const String & message) override;
|
||||||
void syncStageError(const String & current_host, const String & error_message) override;
|
Strings setStatusAndWait(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts) override;
|
||||||
|
Strings setStatusAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, UInt64 timeout_ms) override;
|
||||||
|
|
||||||
void addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name,
|
void addReplicatedPartNames(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name,
|
||||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
|
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
|
||||||
Strings getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const override;
|
Strings getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const override;
|
||||||
|
|
||||||
void addReplicatedDataPath(const String & table_zk_path, const String & data_path) override;
|
void addReplicatedDataPath(const String & table_shared_id, const String & data_path) override;
|
||||||
Strings getReplicatedDataPaths(const String & table_zk_path) const override;
|
Strings getReplicatedDataPaths(const String & table_shared_id) const override;
|
||||||
|
|
||||||
|
void addReplicatedAccessPath(const String & access_zk_path, const String & file_path) override;
|
||||||
|
Strings getReplicatedAccessPaths(const String & access_zk_path) const override;
|
||||||
|
|
||||||
|
void setReplicatedAccessHost(const String & access_zk_path, const String & host_id) override;
|
||||||
|
String getReplicatedAccessHost(const String & access_zk_path) const override;
|
||||||
|
|
||||||
void addFileInfo(const FileInfo & file_info, bool & is_data_file_required) override;
|
void addFileInfo(const FileInfo & file_info, bool & is_data_file_required) override;
|
||||||
void updateFileInfo(const FileInfo & file_info) override;
|
void updateFileInfo(const FileInfo & file_info) override;
|
||||||
@ -47,6 +54,8 @@ private:
|
|||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
BackupCoordinationReplicatedPartNames replicated_part_names TSA_GUARDED_BY(mutex);
|
BackupCoordinationReplicatedPartNames replicated_part_names TSA_GUARDED_BY(mutex);
|
||||||
std::unordered_map<String, Strings> replicated_data_paths TSA_GUARDED_BY(mutex);
|
std::unordered_map<String, Strings> replicated_data_paths TSA_GUARDED_BY(mutex);
|
||||||
|
std::unordered_map<String, Strings> replicated_access_paths TSA_GUARDED_BY(mutex);
|
||||||
|
std::unordered_map<String, String> replicated_access_hosts TSA_GUARDED_BY(mutex);
|
||||||
std::map<String /* file_name */, SizeAndChecksum> file_names TSA_GUARDED_BY(mutex); /// Should be ordered alphabetically, see listFiles(). For empty files we assume checksum = 0.
|
std::map<String /* file_name */, SizeAndChecksum> file_names TSA_GUARDED_BY(mutex); /// Should be ordered alphabetically, see listFiles(). For empty files we assume checksum = 0.
|
||||||
std::map<SizeAndChecksum, FileInfo> file_infos TSA_GUARDED_BY(mutex); /// Information about files. Without empty files.
|
std::map<SizeAndChecksum, FileInfo> file_infos TSA_GUARDED_BY(mutex); /// Information about files. Without empty files.
|
||||||
Strings archive_suffixes TSA_GUARDED_BY(mutex);
|
Strings archive_suffixes TSA_GUARDED_BY(mutex);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -19,6 +19,7 @@ class IBackupCoordination;
|
|||||||
class IDatabase;
|
class IDatabase;
|
||||||
using DatabasePtr = std::shared_ptr<IDatabase>;
|
using DatabasePtr = std::shared_ptr<IDatabase>;
|
||||||
struct StorageID;
|
struct StorageID;
|
||||||
|
enum class AccessEntityType;
|
||||||
|
|
||||||
/// Collects backup entries for all databases and tables which should be put to a backup.
|
/// Collects backup entries for all databases and tables which should be put to a backup.
|
||||||
class BackupEntriesCollector : private boost::noncopyable
|
class BackupEntriesCollector : private boost::noncopyable
|
||||||
@ -27,84 +28,90 @@ public:
|
|||||||
BackupEntriesCollector(const ASTBackupQuery::Elements & backup_query_elements_,
|
BackupEntriesCollector(const ASTBackupQuery::Elements & backup_query_elements_,
|
||||||
const BackupSettings & backup_settings_,
|
const BackupSettings & backup_settings_,
|
||||||
std::shared_ptr<IBackupCoordination> backup_coordination_,
|
std::shared_ptr<IBackupCoordination> backup_coordination_,
|
||||||
const ContextPtr & context_,
|
const ContextPtr & context_);
|
||||||
std::chrono::seconds timeout_ = std::chrono::seconds(-1) /* no timeout */);
|
|
||||||
~BackupEntriesCollector();
|
~BackupEntriesCollector();
|
||||||
|
|
||||||
/// Collects backup entries and returns the result.
|
/// Collects backup entries and returns the result.
|
||||||
/// This function first generates a list of databases and then call IDatabase::backup() for each database from this list.
|
/// This function first generates a list of databases and then call IDatabase::getTablesForBackup() for each database from this list.
|
||||||
/// At this moment IDatabase::backup() calls IStorage::backup() and they both call addBackupEntry() to build a list of backup entries.
|
/// Then it calls IStorage::backupData() to build a list of backup entries.
|
||||||
BackupEntries getBackupEntries();
|
BackupEntries run();
|
||||||
|
|
||||||
const BackupSettings & getBackupSettings() const { return backup_settings; }
|
const BackupSettings & getBackupSettings() const { return backup_settings; }
|
||||||
std::shared_ptr<IBackupCoordination> getBackupCoordination() const { return backup_coordination; }
|
std::shared_ptr<IBackupCoordination> getBackupCoordination() const { return backup_coordination; }
|
||||||
ContextPtr getContext() const { return context; }
|
ContextPtr getContext() const { return context; }
|
||||||
|
|
||||||
/// Adds a backup entry which will be later returned by getBackupEntries().
|
/// Adds a backup entry which will be later returned by run().
|
||||||
/// These function can be called by implementations of IStorage::backup() in inherited storage classes.
|
/// These function can be called by implementations of IStorage::backupData() in inherited storage classes.
|
||||||
void addBackupEntry(const String & file_name, BackupEntryPtr backup_entry);
|
void addBackupEntry(const String & file_name, BackupEntryPtr backup_entry);
|
||||||
|
void addBackupEntry(const std::pair<String, BackupEntryPtr> & backup_entry);
|
||||||
void addBackupEntries(const BackupEntries & backup_entries_);
|
void addBackupEntries(const BackupEntries & backup_entries_);
|
||||||
void addBackupEntries(BackupEntries && backup_entries_);
|
void addBackupEntries(BackupEntries && backup_entries_);
|
||||||
|
|
||||||
/// Adds a function which must be called after all IStorage::backup() have finished their work on all hosts.
|
/// Adds a function which must be called after all IStorage::backupData() have finished their work on all hosts.
|
||||||
/// This function is designed to help making a consistent in some complex cases like
|
/// This function is designed to help making a consistent in some complex cases like
|
||||||
/// 1) we need to join (in a backup) the data of replicated tables gathered on different hosts.
|
/// 1) we need to join (in a backup) the data of replicated tables gathered on different hosts.
|
||||||
void addPostCollectingTask(std::function<void()> task);
|
void addPostTask(std::function<void()> task);
|
||||||
|
|
||||||
/// Writing a backup includes a few stages:
|
/// Returns an incremental counter used to backup access control.
|
||||||
enum class Stage
|
size_t getAccessCounter(AccessEntityType type);
|
||||||
{
|
|
||||||
/// Initial stage.
|
|
||||||
kPreparing,
|
|
||||||
|
|
||||||
/// Finding all tables and databases which we're going to put to the backup.
|
|
||||||
kFindingTables,
|
|
||||||
|
|
||||||
/// Making temporary hard links and prepare backup entries.
|
|
||||||
kExtractingDataFromTables,
|
|
||||||
|
|
||||||
/// Running special tasks for replicated databases or tables which can also prepare some backup entries.
|
|
||||||
kRunningPostTasks,
|
|
||||||
|
|
||||||
/// Writing backup entries to the backup and removing temporary hard links.
|
|
||||||
kWritingBackup,
|
|
||||||
|
|
||||||
/// An error happens during any of the stages above, the backup won't be written.
|
|
||||||
kError,
|
|
||||||
};
|
|
||||||
static std::string_view toString(Stage stage);
|
|
||||||
|
|
||||||
/// Throws an exception that a specified table engine doesn't support partitions.
|
|
||||||
[[noreturn]] static void throwPartitionsNotSupported(const StorageID & storage_id, const String & table_engine);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void setStage(Stage new_stage, const String & error_message = {});
|
|
||||||
void calculateRootPathInBackup();
|
void calculateRootPathInBackup();
|
||||||
void collectDatabasesAndTablesInfo();
|
|
||||||
void collectTableInfo(const QualifiedTableName & table_name, bool is_temporary_table, const std::optional<ASTs> & partitions, bool throw_if_not_found);
|
void gatherMetadataAndCheckConsistency();
|
||||||
void collectDatabaseInfo(const String & database_name, const std::set<DatabaseAndTableName> & except_table_names, bool throw_if_not_found);
|
|
||||||
void collectAllDatabasesInfo(const std::set<String> & except_database_names, const std::set<DatabaseAndTableName> & except_table_names);
|
bool tryGatherMetadataAndCompareWithPrevious(std::optional<Exception> & inconsistency_error);
|
||||||
void checkConsistency();
|
|
||||||
|
void gatherDatabasesMetadata();
|
||||||
|
|
||||||
|
void gatherDatabaseMetadata(
|
||||||
|
const String & database_name,
|
||||||
|
bool throw_if_database_not_found,
|
||||||
|
bool backup_create_database_query,
|
||||||
|
const std::optional<String> & table_name,
|
||||||
|
bool throw_if_table_not_found,
|
||||||
|
const std::optional<ASTs> & partitions,
|
||||||
|
bool all_tables,
|
||||||
|
const std::set<DatabaseAndTableName> & except_table_names);
|
||||||
|
|
||||||
|
void gatherTablesMetadata();
|
||||||
|
void lockTablesForReading();
|
||||||
|
bool compareWithPrevious(std::optional<Exception> & inconsistency_error);
|
||||||
|
|
||||||
void makeBackupEntriesForDatabasesDefs();
|
void makeBackupEntriesForDatabasesDefs();
|
||||||
void makeBackupEntriesForTablesDefs();
|
void makeBackupEntriesForTablesDefs();
|
||||||
void makeBackupEntriesForTablesData();
|
void makeBackupEntriesForTablesData();
|
||||||
void runPostCollectingTasks();
|
void runPostTasks();
|
||||||
|
|
||||||
|
Strings setStatus(const String & new_status, const String & message = "");
|
||||||
|
|
||||||
const ASTBackupQuery::Elements backup_query_elements;
|
const ASTBackupQuery::Elements backup_query_elements;
|
||||||
const BackupSettings backup_settings;
|
const BackupSettings backup_settings;
|
||||||
std::shared_ptr<IBackupCoordination> backup_coordination;
|
std::shared_ptr<IBackupCoordination> backup_coordination;
|
||||||
ContextPtr context;
|
ContextPtr context;
|
||||||
std::chrono::seconds timeout;
|
std::chrono::milliseconds consistent_metadata_snapshot_timeout;
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
|
|
||||||
Stage current_stage = Stage::kPreparing;
|
Strings all_hosts;
|
||||||
std::filesystem::path root_path_in_backup;
|
|
||||||
DDLRenamingMap renaming_map;
|
DDLRenamingMap renaming_map;
|
||||||
|
std::filesystem::path root_path_in_backup;
|
||||||
|
|
||||||
struct DatabaseInfo
|
struct DatabaseInfo
|
||||||
{
|
{
|
||||||
DatabasePtr database;
|
DatabasePtr database;
|
||||||
ASTPtr create_database_query;
|
ASTPtr create_database_query;
|
||||||
|
String metadata_path_in_backup;
|
||||||
|
|
||||||
|
struct TableParams
|
||||||
|
{
|
||||||
|
bool throw_if_table_not_found = false;
|
||||||
|
std::optional<ASTs> partitions;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unordered_map<String, TableParams> tables;
|
||||||
|
|
||||||
|
bool all_tables = false;
|
||||||
|
std::unordered_set<String> except_table_names;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TableInfo
|
struct TableInfo
|
||||||
@ -113,26 +120,22 @@ private:
|
|||||||
StoragePtr storage;
|
StoragePtr storage;
|
||||||
TableLockHolder table_lock;
|
TableLockHolder table_lock;
|
||||||
ASTPtr create_table_query;
|
ASTPtr create_table_query;
|
||||||
|
String metadata_path_in_backup;
|
||||||
std::filesystem::path data_path_in_backup;
|
std::filesystem::path data_path_in_backup;
|
||||||
|
std::optional<String> replicated_table_shared_id;
|
||||||
std::optional<ASTs> partitions;
|
std::optional<ASTs> partitions;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TableKey
|
String current_status;
|
||||||
{
|
std::chrono::steady_clock::time_point consistent_metadata_snapshot_start_time;
|
||||||
QualifiedTableName name;
|
|
||||||
bool is_temporary = false;
|
|
||||||
bool operator ==(const TableKey & right) const;
|
|
||||||
bool operator <(const TableKey & right) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::unordered_map<String, DatabaseInfo> database_infos;
|
std::unordered_map<String, DatabaseInfo> database_infos;
|
||||||
std::map<TableKey, TableInfo> table_infos;
|
std::unordered_map<QualifiedTableName, TableInfo> table_infos;
|
||||||
std::optional<std::set<String>> previous_database_names;
|
std::vector<std::pair<String, String>> previous_databases_metadata;
|
||||||
std::optional<std::set<TableKey>> previous_table_names;
|
std::vector<std::pair<QualifiedTableName, String>> previous_tables_metadata;
|
||||||
bool consistent = false;
|
|
||||||
|
|
||||||
BackupEntries backup_entries;
|
BackupEntries backup_entries;
|
||||||
std::queue<std::function<void()>> post_collecting_tasks;
|
std::queue<std::function<void()>> post_tasks;
|
||||||
|
std::vector<size_t> access_counters;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ DDLRenamingMap makeRenamingMapFromBackupQuery(const ASTBackupQuery::Elements & e
|
|||||||
const String & new_table_name = element.new_table_name;
|
const String & new_table_name = element.new_table_name;
|
||||||
assert(!table_name.empty());
|
assert(!table_name.empty());
|
||||||
assert(!new_table_name.empty());
|
assert(!new_table_name.empty());
|
||||||
map.setNewTemporaryTableName(table_name, new_table_name);
|
map.setNewTableName({DatabaseCatalog::TEMPORARY_DATABASE, table_name}, {DatabaseCatalog::TEMPORARY_DATABASE, new_table_name});
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,9 +166,8 @@ UUID BackupsWorker::startMakingBackup(const ASTPtr & query, const ContextPtr & c
|
|||||||
|
|
||||||
BackupEntries backup_entries;
|
BackupEntries backup_entries;
|
||||||
{
|
{
|
||||||
auto timeout = std::chrono::seconds{context_in_use->getConfigRef().getInt("backups.backup_prepare_timeout", -1)};
|
BackupEntriesCollector backup_entries_collector{backup_query->elements, backup_settings, backup_coordination, context_in_use};
|
||||||
BackupEntriesCollector backup_entries_collector{backup_query->elements, backup_settings, backup_coordination, context_in_use, timeout};
|
backup_entries = backup_entries_collector.run();
|
||||||
backup_entries = backup_entries_collector.getBackupEntries();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBackupEntries(backup, std::move(backup_entries), backups_thread_pool);
|
writeBackupEntries(backup, std::move(backup_entries), backups_thread_pool);
|
||||||
@ -272,8 +271,8 @@ UUID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePtr conte
|
|||||||
String addr_database = address->default_database.empty() ? current_database : address->default_database;
|
String addr_database = address->default_database.empty() ? current_database : address->default_database;
|
||||||
for (auto & element : restore_elements)
|
for (auto & element : restore_elements)
|
||||||
element.setCurrentDatabase(addr_database);
|
element.setCurrentDatabase(addr_database);
|
||||||
RestorerFromBackup dummy_restorer{restore_elements, restore_settings, nullptr, backup, context_in_use, {}};
|
RestorerFromBackup dummy_restorer{restore_elements, restore_settings, nullptr, backup, context_in_use};
|
||||||
dummy_restorer.checkAccessOnly();
|
dummy_restorer.run(RestorerFromBackup::CHECK_ACCESS_ONLY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,11 +324,9 @@ UUID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePtr conte
|
|||||||
|
|
||||||
DataRestoreTasks data_restore_tasks;
|
DataRestoreTasks data_restore_tasks;
|
||||||
{
|
{
|
||||||
auto timeout = std::chrono::seconds{context_in_use->getConfigRef().getInt("backups.restore_metadata_timeout", -1)};
|
|
||||||
RestorerFromBackup restorer{restore_query->elements, restore_settings, restore_coordination,
|
RestorerFromBackup restorer{restore_query->elements, restore_settings, restore_coordination,
|
||||||
backup, context_in_use, timeout};
|
backup, context_in_use};
|
||||||
restorer.restoreMetadata();
|
data_restore_tasks = restorer.run(RestorerFromBackup::RESTORE);
|
||||||
data_restore_tasks = restorer.getDataRestoreTasks();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
restoreTablesData(std::move(data_restore_tasks), restores_thread_pool);
|
restoreTablesData(std::move(data_restore_tasks), restores_thread_pool);
|
||||||
|
113
src/Backups/DDLAdjustingForBackupVisitor.cpp
Normal file
113
src/Backups/DDLAdjustingForBackupVisitor.cpp
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
#include <Backups/DDLAdjustingForBackupVisitor.h>
|
||||||
|
#include <Parsers/ASTCreateQuery.h>
|
||||||
|
#include <Parsers/ASTFunction.h>
|
||||||
|
#include <Parsers/ASTLiteral.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Storages/StorageReplicatedMergeTree.h>
|
||||||
|
|
||||||
|
#include <Parsers/formatAST.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
void visitStorageSystemTableEngine(ASTStorage &, const DDLAdjustingForBackupVisitor::Data & data)
|
||||||
|
{
|
||||||
|
/// Precondition: storage.engine && storage.engine->name.starts_with("System"))
|
||||||
|
|
||||||
|
/// If this is a definition of a system table we'll remove columns and comment because they're redundant for backups.
|
||||||
|
auto & create = data.create_query->as<ASTCreateQuery &>();
|
||||||
|
create.reset(create.columns_list);
|
||||||
|
create.reset(create.comment);
|
||||||
|
}
|
||||||
|
|
||||||
|
void visitStorageReplicatedTableEngine(ASTStorage & storage, const DDLAdjustingForBackupVisitor::Data & data)
|
||||||
|
{
|
||||||
|
/// Precondition: engine_name.starts_with("Replicated") && engine_name.ends_with("MergeTree")
|
||||||
|
|
||||||
|
if (data.replicated_table_shared_id)
|
||||||
|
*data.replicated_table_shared_id = StorageReplicatedMergeTree::tryGetTableSharedIDFromCreateQuery(*data.create_query, data.global_context);
|
||||||
|
|
||||||
|
/// Before storing the metadata in a backup we have to find a zookeeper path in its definition and turn the table's UUID in there
|
||||||
|
/// back into "{uuid}", and also we probably can remove the zookeeper path and replica name if they're default.
|
||||||
|
/// So we're kind of reverting what we had done to the table's definition in registerStorageMergeTree.cpp before we created this table.
|
||||||
|
auto & create = data.create_query->as<ASTCreateQuery &>();
|
||||||
|
auto & engine = *storage.engine;
|
||||||
|
|
||||||
|
auto * engine_args_ast = typeid_cast<ASTExpressionList *>(engine.arguments.get());
|
||||||
|
if (!engine_args_ast)
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto & engine_args = engine_args_ast->children;
|
||||||
|
if (engine_args.size() < 2)
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto * zookeeper_path_ast = typeid_cast<ASTLiteral *>(engine_args[0].get());
|
||||||
|
auto * replica_name_ast = typeid_cast<ASTLiteral *>(engine_args[1].get());
|
||||||
|
if (zookeeper_path_ast && (zookeeper_path_ast->value.getType() == Field::Types::String) &&
|
||||||
|
replica_name_ast && (replica_name_ast->value.getType() == Field::Types::String))
|
||||||
|
{
|
||||||
|
String & zookeeper_path_arg = zookeeper_path_ast->value.get<String>();
|
||||||
|
String & replica_name_arg = replica_name_ast->value.get<String>();
|
||||||
|
if (create.uuid != UUIDHelpers::Nil)
|
||||||
|
{
|
||||||
|
String table_uuid_str = toString(create.uuid);
|
||||||
|
if (size_t uuid_pos = zookeeper_path_arg.find(table_uuid_str); uuid_pos != String::npos)
|
||||||
|
zookeeper_path_arg.replace(uuid_pos, table_uuid_str.size(), "{uuid}");
|
||||||
|
}
|
||||||
|
const auto & config = data.global_context->getConfigRef();
|
||||||
|
if ((zookeeper_path_arg == StorageReplicatedMergeTree::getDefaultZooKeeperPath(config))
|
||||||
|
&& (replica_name_arg == StorageReplicatedMergeTree::getDefaultReplicaName(config))
|
||||||
|
&& ((engine_args.size() == 2) || !engine_args[2]->as<ASTLiteral>()))
|
||||||
|
{
|
||||||
|
engine_args.erase(engine_args.begin(), engine_args.begin() + 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void visitStorage(ASTStorage & storage, const DDLAdjustingForBackupVisitor::Data & data)
|
||||||
|
{
|
||||||
|
if (!storage.engine)
|
||||||
|
return;
|
||||||
|
|
||||||
|
const String & engine_name = storage.engine->name;
|
||||||
|
if (engine_name.starts_with("System"))
|
||||||
|
visitStorageSystemTableEngine(storage, data);
|
||||||
|
else if (engine_name.starts_with("Replicated") && engine_name.ends_with("MergeTree"))
|
||||||
|
visitStorageReplicatedTableEngine(storage, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void visitCreateQuery(ASTCreateQuery & create, const DDLAdjustingForBackupVisitor::Data & data)
|
||||||
|
{
|
||||||
|
create.uuid = UUIDHelpers::Nil;
|
||||||
|
create.to_inner_uuid = UUIDHelpers::Nil;
|
||||||
|
|
||||||
|
if (create.storage)
|
||||||
|
visitStorage(*create.storage, data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool DDLAdjustingForBackupVisitor::needChildVisit(const ASTPtr &, const ASTPtr &)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DDLAdjustingForBackupVisitor::visit(ASTPtr ast, const Data & data)
|
||||||
|
{
|
||||||
|
if (auto * create = ast->as<ASTCreateQuery>())
|
||||||
|
visitCreateQuery(*create, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void adjustCreateQueryForBackup(ASTPtr ast, const ContextPtr & global_context, std::optional<String> * replicated_table_shared_id)
|
||||||
|
{
|
||||||
|
if (replicated_table_shared_id)
|
||||||
|
*replicated_table_shared_id = {};
|
||||||
|
|
||||||
|
DDLAdjustingForBackupVisitor::Data data{ast, global_context, replicated_table_shared_id};
|
||||||
|
DDLAdjustingForBackupVisitor::Visitor{data}.visit(ast);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
36
src/Backups/DDLAdjustingForBackupVisitor.h
Normal file
36
src/Backups/DDLAdjustingForBackupVisitor.h
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Interpreters/InDepthNodeVisitor.h>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
class IAST;
|
||||||
|
using ASTPtr = std::shared_ptr<IAST>;
|
||||||
|
class Context;
|
||||||
|
using ContextPtr = std::shared_ptr<const Context>;
|
||||||
|
|
||||||
|
/// Changes a create query to a form which is appropriate or suitable for saving in a backup.
|
||||||
|
/// Also extracts a replicated table's shared ID from the create query if this is a create query for a replicated table.
|
||||||
|
/// `replicated_table_shared_id` can be null if you don't need that.
|
||||||
|
void adjustCreateQueryForBackup(ASTPtr ast, const ContextPtr & global_context, std::optional<String> * replicated_table_shared_id);
|
||||||
|
|
||||||
|
/// Visits ASTCreateQuery and changes it to a form which is appropriate or suitable for saving in a backup.
|
||||||
|
class DDLAdjustingForBackupVisitor
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
struct Data
|
||||||
|
{
|
||||||
|
ASTPtr create_query;
|
||||||
|
ContextPtr global_context;
|
||||||
|
std::optional<String> * replicated_table_shared_id = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
using Visitor = InDepthNodeVisitor<DDLAdjustingForBackupVisitor, false>;
|
||||||
|
|
||||||
|
static bool needChildVisit(const ASTPtr & ast, const ASTPtr & child);
|
||||||
|
static void visit(ASTPtr ast, const Data & data);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -13,11 +13,10 @@ class IBackupCoordination
|
|||||||
public:
|
public:
|
||||||
virtual ~IBackupCoordination() = default;
|
virtual ~IBackupCoordination() = default;
|
||||||
|
|
||||||
/// Sets the current stage and waits for other hosts to come to this stage too.
|
/// Sets the current status and waits for other hosts to come to this status too. If status starts with "error:" it'll stop waiting on all the hosts.
|
||||||
virtual void syncStage(const String & current_host, int stage, const Strings & wait_hosts, std::chrono::seconds timeout) = 0;
|
virtual void setStatus(const String & current_host, const String & new_status, const String & message) = 0;
|
||||||
|
virtual Strings setStatusAndWait(const String & current_host, const String & new_status, const String & message, const Strings & other_hosts) = 0;
|
||||||
/// Sets that the current host encountered an error, so other hosts should know that and stop waiting in syncStage().
|
virtual Strings setStatusAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & other_hosts, UInt64 timeout_ms) = 0;
|
||||||
virtual void syncStageError(const String & current_host, const String & error_message) = 0;
|
|
||||||
|
|
||||||
struct PartNameAndChecksum
|
struct PartNameAndChecksum
|
||||||
{
|
{
|
||||||
@ -29,21 +28,29 @@ public:
|
|||||||
/// Multiple replicas of the replicated table call this function and then the added part names can be returned by call of the function
|
/// Multiple replicas of the replicated table call this function and then the added part names can be returned by call of the function
|
||||||
/// getReplicatedPartNames().
|
/// getReplicatedPartNames().
|
||||||
/// Checksums are used only to control that parts under the same names on different replicas are the same.
|
/// Checksums are used only to control that parts under the same names on different replicas are the same.
|
||||||
virtual void addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name,
|
virtual void addReplicatedPartNames(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name,
|
||||||
const std::vector<PartNameAndChecksum> & part_names_and_checksums) = 0;
|
const std::vector<PartNameAndChecksum> & part_names_and_checksums) = 0;
|
||||||
|
|
||||||
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
|
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
|
||||||
/// This is the same list as it was added by call of the function addReplicatedPartNames() but without duplications and without
|
/// This is the same list as it was added by call of the function addReplicatedPartNames() but without duplications and without
|
||||||
/// parts covered by another parts.
|
/// parts covered by another parts.
|
||||||
virtual Strings getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const = 0;
|
virtual Strings getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const = 0;
|
||||||
|
|
||||||
/// Adds a data path in backup for a replicated table.
|
/// Adds a data path in backup for a replicated table.
|
||||||
/// Multiple replicas of the replicated table call this function and then all the added paths can be returned by call of the function
|
/// Multiple replicas of the replicated table call this function and then all the added paths can be returned by call of the function
|
||||||
/// getReplicatedDataPaths().
|
/// getReplicatedDataPaths().
|
||||||
virtual void addReplicatedDataPath(const String & table_zk_path, const String & data_path) = 0;
|
virtual void addReplicatedDataPath(const String & table_shared_id, const String & data_path) = 0;
|
||||||
|
|
||||||
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedDataPath()).
|
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedDataPath()).
|
||||||
virtual Strings getReplicatedDataPaths(const String & table_zk_path) const = 0;
|
virtual Strings getReplicatedDataPaths(const String & table_shared_id) const = 0;
|
||||||
|
|
||||||
|
/// Adds a path to access.txt file keeping access entities of a ReplicatedAccessStorage.
|
||||||
|
virtual void addReplicatedAccessPath(const String & access_zk_path, const String & file_path) = 0;
|
||||||
|
virtual Strings getReplicatedAccessPaths(const String & access_zk_path) const = 0;
|
||||||
|
|
||||||
|
/// Sets the host id of a host storing access entities of a ReplicatedAccessStorage to backup.
|
||||||
|
virtual void setReplicatedAccessHost(const String & access_zk_path, const String & host) = 0;
|
||||||
|
virtual String getReplicatedAccessHost(const String & access_zk_path) const = 0;
|
||||||
|
|
||||||
struct FileInfo
|
struct FileInfo
|
||||||
{
|
{
|
||||||
|
@ -13,11 +13,10 @@ class IRestoreCoordination
|
|||||||
public:
|
public:
|
||||||
virtual ~IRestoreCoordination() = default;
|
virtual ~IRestoreCoordination() = default;
|
||||||
|
|
||||||
/// Sets the current stage and waits for other hosts to come to this stage too.
|
/// Sets the current status and waits for other hosts to come to this status too. If status starts with "error:" it'll stop waiting on all the hosts.
|
||||||
virtual void syncStage(const String & current_host, int stage, const Strings & wait_hosts, std::chrono::seconds timeout) = 0;
|
virtual void setStatus(const String & current_host, const String & new_status, const String & message) = 0;
|
||||||
|
virtual Strings setStatusAndWait(const String & current_host, const String & new_status, const String & message, const Strings & other_hosts) = 0;
|
||||||
/// Sets that the current host encountered an error, so other hosts should know that and stop waiting in syncStage().
|
virtual Strings setStatusAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & other_hosts, UInt64 timeout_ms) = 0;
|
||||||
virtual void syncStageError(const String & current_host, const String & error_message) = 0;
|
|
||||||
|
|
||||||
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
|
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
|
||||||
virtual bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) = 0;
|
virtual bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) = 0;
|
||||||
|
@ -9,7 +9,7 @@ namespace DB
|
|||||||
RestoreCoordinationDistributed::RestoreCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
|
RestoreCoordinationDistributed::RestoreCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
|
||||||
: zookeeper_path(zookeeper_path_)
|
: zookeeper_path(zookeeper_path_)
|
||||||
, get_zookeeper(get_zookeeper_)
|
, get_zookeeper(get_zookeeper_)
|
||||||
, stage_sync(zookeeper_path_ + "/stage", get_zookeeper_, &Poco::Logger::get("RestoreCoordination"))
|
, status_sync(zookeeper_path_ + "/status", get_zookeeper_, &Poco::Logger::get("RestoreCoordination"))
|
||||||
{
|
{
|
||||||
createRootNodes();
|
createRootNodes();
|
||||||
}
|
}
|
||||||
@ -26,14 +26,19 @@ void RestoreCoordinationDistributed::createRootNodes()
|
|||||||
zookeeper->createIfNotExists(zookeeper_path + "/repl_access_storages_acquired", "");
|
zookeeper->createIfNotExists(zookeeper_path + "/repl_access_storages_acquired", "");
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestoreCoordinationDistributed::syncStage(const String & current_host, int new_stage, const Strings & wait_hosts, std::chrono::seconds timeout)
|
void RestoreCoordinationDistributed::setStatus(const String & current_host, const String & new_status, const String & message)
|
||||||
{
|
{
|
||||||
stage_sync.syncStage(current_host, new_stage, wait_hosts, timeout);
|
status_sync.set(current_host, new_status, message);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestoreCoordinationDistributed::syncStageError(const String & current_host, const String & error_message)
|
Strings RestoreCoordinationDistributed::setStatusAndWait(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts)
|
||||||
{
|
{
|
||||||
stage_sync.syncStageError(current_host, error_message);
|
return status_sync.setAndWait(current_host, new_status, message, all_hosts);
|
||||||
|
}
|
||||||
|
|
||||||
|
Strings RestoreCoordinationDistributed::setStatusAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, UInt64 timeout_ms)
|
||||||
|
{
|
||||||
|
return status_sync.setAndWaitFor(current_host, new_status, message, all_hosts, timeout_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RestoreCoordinationDistributed::acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name)
|
bool RestoreCoordinationDistributed::acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name)
|
||||||
|
@ -14,11 +14,10 @@ public:
|
|||||||
RestoreCoordinationDistributed(const String & zookeeper_path, zkutil::GetZooKeeper get_zookeeper);
|
RestoreCoordinationDistributed(const String & zookeeper_path, zkutil::GetZooKeeper get_zookeeper);
|
||||||
~RestoreCoordinationDistributed() override;
|
~RestoreCoordinationDistributed() override;
|
||||||
|
|
||||||
/// Sets the current stage and waits for other hosts to come to this stage too.
|
/// Sets the current status and waits for other hosts to come to this status too. If status starts with "error:" it'll stop waiting on all the hosts.
|
||||||
void syncStage(const String & current_host, int new_stage, const Strings & wait_hosts, std::chrono::seconds timeout) override;
|
void setStatus(const String & current_host, const String & new_status, const String & message) override;
|
||||||
|
Strings setStatusAndWait(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts) override;
|
||||||
/// Sets that the current host encountered an error, so other hosts should know that and stop waiting in syncStage().
|
Strings setStatusAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, UInt64 timeout_ms) override;
|
||||||
void syncStageError(const String & current_host, const String & error_message) override;
|
|
||||||
|
|
||||||
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
|
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
|
||||||
bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override;
|
bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override;
|
||||||
@ -42,7 +41,7 @@ private:
|
|||||||
|
|
||||||
const String zookeeper_path;
|
const String zookeeper_path;
|
||||||
const zkutil::GetZooKeeper get_zookeeper;
|
const zkutil::GetZooKeeper get_zookeeper;
|
||||||
BackupCoordinationStageSync stage_sync;
|
BackupCoordinationStatusSync status_sync;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -7,12 +7,18 @@ namespace DB
|
|||||||
RestoreCoordinationLocal::RestoreCoordinationLocal() = default;
|
RestoreCoordinationLocal::RestoreCoordinationLocal() = default;
|
||||||
RestoreCoordinationLocal::~RestoreCoordinationLocal() = default;
|
RestoreCoordinationLocal::~RestoreCoordinationLocal() = default;
|
||||||
|
|
||||||
void RestoreCoordinationLocal::syncStage(const String &, int, const Strings &, std::chrono::seconds)
|
void RestoreCoordinationLocal::setStatus(const String &, const String &, const String &)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestoreCoordinationLocal::syncStageError(const String &, const String &)
|
Strings RestoreCoordinationLocal::setStatusAndWait(const String &, const String &, const String &, const Strings &)
|
||||||
{
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Strings RestoreCoordinationLocal::setStatusAndWaitFor(const String &, const String &, const String &, const Strings &, UInt64)
|
||||||
|
{
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
bool RestoreCoordinationLocal::acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name)
|
bool RestoreCoordinationLocal::acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name)
|
||||||
|
@ -17,11 +17,10 @@ public:
|
|||||||
RestoreCoordinationLocal();
|
RestoreCoordinationLocal();
|
||||||
~RestoreCoordinationLocal() override;
|
~RestoreCoordinationLocal() override;
|
||||||
|
|
||||||
/// Sets the current stage and waits for other hosts to come to this stage too.
|
/// Sets the current status and waits for other hosts to come to this status too. If status starts with "error:" it'll stop waiting on all the hosts.
|
||||||
void syncStage(const String & current_host, int stage, const Strings & wait_hosts, std::chrono::seconds timeout) override;
|
void setStatus(const String & current_host, const String & new_status, const String & message) override;
|
||||||
|
Strings setStatusAndWait(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts) override;
|
||||||
/// Sets that the current host encountered an error, so other hosts should know that and stop waiting in syncStage().
|
Strings setStatusAndWaitFor(const String & current_host, const String & new_status, const String & message, const Strings & all_hosts, UInt64 timeout_ms) override;
|
||||||
void syncStageError(const String & current_host, const String & error_message) override;
|
|
||||||
|
|
||||||
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
|
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
|
||||||
bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override;
|
bool acquireCreatingTableInReplicatedDatabase(const String & database_zk_path, const String & table_name) override;
|
||||||
|
@ -74,7 +74,7 @@ namespace
|
|||||||
{
|
{
|
||||||
case RestoreTableCreationMode::kCreate: return Field{true};
|
case RestoreTableCreationMode::kCreate: return Field{true};
|
||||||
case RestoreTableCreationMode::kMustExist: return Field{false};
|
case RestoreTableCreationMode::kMustExist: return Field{false};
|
||||||
case RestoreTableCreationMode::kCreateIfNotExists: return Field{"if not exists"};
|
case RestoreTableCreationMode::kCreateIfNotExists: return Field{"if-not-exists"};
|
||||||
}
|
}
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected value of enum RestoreTableCreationMode: {}", static_cast<int>(value));
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected value of enum RestoreTableCreationMode: {}", static_cast<int>(value));
|
||||||
}
|
}
|
||||||
@ -131,12 +131,14 @@ namespace
|
|||||||
switch (value)
|
switch (value)
|
||||||
{
|
{
|
||||||
case RestoreAccessCreationMode::kCreate: return Field{true};
|
case RestoreAccessCreationMode::kCreate: return Field{true};
|
||||||
case RestoreAccessCreationMode::kCreateIfNotExists: return Field{"if not exists"};
|
case RestoreAccessCreationMode::kCreateIfNotExists: return Field{"if-not-exists"};
|
||||||
case RestoreAccessCreationMode::kReplace: return Field{"replace"};
|
case RestoreAccessCreationMode::kReplace: return Field{"replace"};
|
||||||
}
|
}
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected value of enum RestoreAccessCreationMode: {}", static_cast<int>(value));
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected value of enum RestoreAccessCreationMode: {}", static_cast<int>(value));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using SettingFieldRestoreUDFCreationMode = SettingFieldRestoreAccessCreationMode;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// List of restore settings except base_backup_name and cluster_host_ids.
|
/// List of restore settings except base_backup_name and cluster_host_ids.
|
||||||
@ -155,6 +157,7 @@ namespace
|
|||||||
M(Bool, allow_non_empty_tables) \
|
M(Bool, allow_non_empty_tables) \
|
||||||
M(RestoreAccessCreationMode, create_access) \
|
M(RestoreAccessCreationMode, create_access) \
|
||||||
M(Bool, allow_unresolved_access_dependencies) \
|
M(Bool, allow_unresolved_access_dependencies) \
|
||||||
|
M(RestoreUDFCreationMode, create_function) \
|
||||||
M(Bool, internal) \
|
M(Bool, internal) \
|
||||||
M(String, host_id) \
|
M(String, host_id) \
|
||||||
M(String, coordination_zk_path)
|
M(String, coordination_zk_path)
|
||||||
|
@ -36,6 +36,8 @@ enum class RestoreAccessCreationMode
|
|||||||
kReplace,
|
kReplace,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using RestoreUDFCreationMode = RestoreAccessCreationMode;
|
||||||
|
|
||||||
/// Settings specified in the "SETTINGS" clause of a RESTORE query.
|
/// Settings specified in the "SETTINGS" clause of a RESTORE query.
|
||||||
struct RestoreSettings
|
struct RestoreSettings
|
||||||
{
|
{
|
||||||
@ -99,6 +101,9 @@ struct RestoreSettings
|
|||||||
/// For example, if an user has a profile assigned and that profile is not in the backup and doesn't exist locally.
|
/// For example, if an user has a profile assigned and that profile is not in the backup and doesn't exist locally.
|
||||||
bool allow_unresolved_access_dependencies = false;
|
bool allow_unresolved_access_dependencies = false;
|
||||||
|
|
||||||
|
/// How the RESTORE command will handle if a user-defined function which it's going to restore already exists.
|
||||||
|
RestoreUDFCreationMode create_function = RestoreUDFCreationMode::kCreateIfNotExists;
|
||||||
|
|
||||||
/// Internal, should not be specified by user.
|
/// Internal, should not be specified by user.
|
||||||
bool internal = false;
|
bool internal = false;
|
||||||
|
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
#include <Backups/RestorerFromBackup.h>
|
#include <Backups/RestorerFromBackup.h>
|
||||||
#include <Backups/IRestoreCoordination.h>
|
#include <Backups/IRestoreCoordination.h>
|
||||||
|
#include <Backups/BackupCoordinationHelpers.h>
|
||||||
#include <Backups/BackupSettings.h>
|
#include <Backups/BackupSettings.h>
|
||||||
#include <Backups/IBackup.h>
|
#include <Backups/IBackup.h>
|
||||||
#include <Backups/IBackupEntry.h>
|
#include <Backups/IBackupEntry.h>
|
||||||
#include <Backups/BackupUtils.h>
|
#include <Backups/BackupUtils.h>
|
||||||
|
#include <Backups/DDLAdjustingForBackupVisitor.h>
|
||||||
#include <Access/AccessBackup.h>
|
#include <Access/AccessBackup.h>
|
||||||
#include <Access/AccessRights.h>
|
#include <Access/AccessRights.h>
|
||||||
#include <Parsers/ParserCreateQuery.h>
|
#include <Parsers/ParserCreateQuery.h>
|
||||||
@ -39,54 +41,58 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
constexpr const std::string_view sql_ext = ".sql";
|
/// Finding databases and tables in the backup which we're going to restore.
|
||||||
|
constexpr const char * kFindingTablesInBackupStatus = "finding tables in backup";
|
||||||
|
|
||||||
String tryGetTableEngine(const IAST & ast)
|
/// Creating databases or finding them and checking their definitions.
|
||||||
|
constexpr const char * kCreatingDatabasesStatus = "creating databases";
|
||||||
|
|
||||||
|
/// Creating tables or finding them and checking their definition.
|
||||||
|
constexpr const char * kCreatingTablesStatus = "creating tables";
|
||||||
|
|
||||||
|
/// Inserting restored data to tables.
|
||||||
|
constexpr const char * kInsertingDataToTablesStatus = "inserting data to tables";
|
||||||
|
|
||||||
|
/// Error status.
|
||||||
|
constexpr const char * kErrorStatus = BackupCoordinationStatusSync::kErrorStatus;
|
||||||
|
|
||||||
|
/// Uppercases the first character of a passed string.
|
||||||
|
String toUpperFirst(const String & str)
|
||||||
{
|
{
|
||||||
const ASTCreateQuery * create = ast.as<ASTCreateQuery>();
|
String res = str;
|
||||||
if (!create)
|
res[0] = std::toupper(res[0]);
|
||||||
return {};
|
return res;
|
||||||
if (!create->storage || !create->storage->engine)
|
|
||||||
return {};
|
|
||||||
return create->storage->engine->name;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasSystemTableEngine(const IAST & ast)
|
/// Outputs "table <name>" or "temporary table <name>"
|
||||||
|
String tableNameWithTypeToString(const String & database_name, const String & table_name, bool first_upper)
|
||||||
{
|
{
|
||||||
return tryGetTableEngine(ast).starts_with("System");
|
String str;
|
||||||
|
if (database_name == DatabaseCatalog::TEMPORARY_DATABASE)
|
||||||
|
str = fmt::format("temporary table {}", backQuoteIfNeed(table_name));
|
||||||
|
else
|
||||||
|
str = fmt::format("table {}.{}", backQuoteIfNeed(database_name), backQuoteIfNeed(table_name));
|
||||||
|
if (first_upper)
|
||||||
|
str[0] = std::toupper(str[0]);
|
||||||
|
return str;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasSystemAccessTableEngine(const IAST & ast)
|
/// Whether a specified name corresponds one of the tables backuping ACL.
|
||||||
|
bool isSystemAccessTableName(const QualifiedTableName & table_name)
|
||||||
{
|
{
|
||||||
String engine_name = tryGetTableEngine(ast);
|
if (table_name.database != DatabaseCatalog::SYSTEM_DATABASE)
|
||||||
return (engine_name == "SystemUsers") || (engine_name == "SystemRoles") || (engine_name == "SystemSettingsProfiles")
|
return false;
|
||||||
|| (engine_name == "SystemRowPolicies") || (engine_name == "SystemQuotas");
|
|
||||||
|
return (table_name.table == "users") || (table_name.table == "roles") || (table_name.table == "settings_profiles")
|
||||||
|
|| (table_name.table == "row_policies") || (table_name.table == "quotas");
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
bool RestorerFromBackup::TableKey::operator ==(const TableKey & right) const
|
/// Whether a specified name corresponds one of the tables backuping ACL.
|
||||||
{
|
bool isSystemFunctionsTableName(const QualifiedTableName & table_name)
|
||||||
return (name == right.name) && (is_temporary == right.is_temporary);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool RestorerFromBackup::TableKey::operator <(const TableKey & right) const
|
|
||||||
{
|
|
||||||
return (name < right.name) || ((name == right.name) && (is_temporary < right.is_temporary));
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string_view RestorerFromBackup::toString(Stage stage)
|
|
||||||
{
|
|
||||||
switch (stage)
|
|
||||||
{
|
{
|
||||||
case Stage::kPreparing: return "Preparing";
|
return (table_name.database == DatabaseCatalog::SYSTEM_DATABASE) && (table_name.table == "functions");
|
||||||
case Stage::kFindingTablesInBackup: return "Finding tables in backup";
|
|
||||||
case Stage::kCreatingDatabases: return "Creating databases";
|
|
||||||
case Stage::kCreatingTables: return "Creating tables";
|
|
||||||
case Stage::kInsertingDataToTables: return "Inserting data to tables";
|
|
||||||
case Stage::kError: return "Error";
|
|
||||||
}
|
}
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown restore stage: {}", static_cast<int>(stage));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
RestorerFromBackup::RestorerFromBackup(
|
RestorerFromBackup::RestorerFromBackup(
|
||||||
@ -94,71 +100,66 @@ RestorerFromBackup::RestorerFromBackup(
|
|||||||
const RestoreSettings & restore_settings_,
|
const RestoreSettings & restore_settings_,
|
||||||
std::shared_ptr<IRestoreCoordination> restore_coordination_,
|
std::shared_ptr<IRestoreCoordination> restore_coordination_,
|
||||||
const BackupPtr & backup_,
|
const BackupPtr & backup_,
|
||||||
const ContextMutablePtr & context_,
|
const ContextMutablePtr & context_)
|
||||||
std::chrono::seconds timeout_)
|
|
||||||
: restore_query_elements(restore_query_elements_)
|
: restore_query_elements(restore_query_elements_)
|
||||||
, restore_settings(restore_settings_)
|
, restore_settings(restore_settings_)
|
||||||
, restore_coordination(restore_coordination_)
|
, restore_coordination(restore_coordination_)
|
||||||
, backup(backup_)
|
, backup(backup_)
|
||||||
, context(context_)
|
, context(context_)
|
||||||
, timeout(timeout_)
|
, create_table_timeout(context->getConfigRef().getUInt64("backups.create_table_timeout", 300000))
|
||||||
, log(&Poco::Logger::get("RestorerFromBackup"))
|
, log(&Poco::Logger::get("RestorerFromBackup"))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
RestorerFromBackup::~RestorerFromBackup() = default;
|
RestorerFromBackup::~RestorerFromBackup() = default;
|
||||||
|
|
||||||
void RestorerFromBackup::restoreMetadata()
|
RestorerFromBackup::DataRestoreTasks RestorerFromBackup::run(Mode mode)
|
||||||
{
|
|
||||||
run(/* only_check_access= */ false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void RestorerFromBackup::checkAccessOnly()
|
|
||||||
{
|
|
||||||
run(/* only_check_access= */ true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void RestorerFromBackup::run(bool only_check_access)
|
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
/// restoreMetadata() must not be called multiple times.
|
/// run() can be called onle once.
|
||||||
if (current_stage != Stage::kPreparing)
|
if (!current_status.empty())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Already restoring");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Already restoring");
|
||||||
|
|
||||||
/// Calculate the root path in the backup for restoring, it's either empty or has the format "shards/<shard_num>/replicas/<replica_num>/".
|
/// Find other hosts working along with us to execute this ON CLUSTER query.
|
||||||
findRootPathsInBackup();
|
all_hosts = BackupSettings::Util::filterHostIDs(
|
||||||
|
restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num);
|
||||||
|
|
||||||
/// Do renaming in the create queries according to the renaming config.
|
/// Do renaming in the create queries according to the renaming config.
|
||||||
renaming_map = makeRenamingMapFromBackupQuery(restore_query_elements);
|
renaming_map = makeRenamingMapFromBackupQuery(restore_query_elements);
|
||||||
|
|
||||||
|
/// Calculate the root path in the backup for restoring, it's either empty or has the format "shards/<shard_num>/replicas/<replica_num>/".
|
||||||
|
findRootPathsInBackup();
|
||||||
|
|
||||||
/// Find all the databases and tables which we will read from the backup.
|
/// Find all the databases and tables which we will read from the backup.
|
||||||
setStage(Stage::kFindingTablesInBackup);
|
setStatus(kFindingTablesInBackupStatus);
|
||||||
collectDatabaseAndTableInfos();
|
findDatabasesAndTablesInBackup();
|
||||||
|
|
||||||
/// Check access rights.
|
/// Check access rights.
|
||||||
checkAccessForCollectedInfos();
|
checkAccessForObjectsFoundInBackup();
|
||||||
if (only_check_access)
|
|
||||||
return;
|
if (mode == Mode::CHECK_ACCESS_ONLY)
|
||||||
|
return {};
|
||||||
|
|
||||||
/// Create databases using the create queries read from the backup.
|
/// Create databases using the create queries read from the backup.
|
||||||
setStage(Stage::kCreatingDatabases);
|
setStatus(kCreatingDatabasesStatus);
|
||||||
createDatabases();
|
createDatabases();
|
||||||
|
|
||||||
/// Create tables using the create queries read from the backup.
|
/// Create tables using the create queries read from the backup.
|
||||||
setStage(Stage::kCreatingTables);
|
setStatus(kCreatingTablesStatus);
|
||||||
createTables();
|
createTables();
|
||||||
|
|
||||||
/// All what's left is to insert data to tables.
|
/// All what's left is to insert data to tables.
|
||||||
/// No more data restoring tasks are allowed after this point.
|
/// No more data restoring tasks are allowed after this point.
|
||||||
setStage(Stage::kInsertingDataToTables);
|
setStatus(kInsertingDataToTablesStatus);
|
||||||
|
return getDataRestoreTasks();
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
/// Other hosts should know that we've encountered an error.
|
/// Other hosts should know that we've encountered an error.
|
||||||
setStage(Stage::kError, getCurrentExceptionMessage(false));
|
setStatus(kErrorStatus, getCurrentExceptionMessage(false));
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -167,59 +168,20 @@ void RestorerFromBackup::run(bool only_check_access)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RestorerFromBackup::setStatus(const String & new_status, const String & message)
|
||||||
RestorerFromBackup::DataRestoreTasks RestorerFromBackup::getDataRestoreTasks()
|
|
||||||
{
|
{
|
||||||
if (current_stage != Stage::kInsertingDataToTables)
|
if (new_status == kErrorStatus)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Metadata wasn't restored");
|
|
||||||
|
|
||||||
if (data_restore_tasks.empty() && !access_restore_task)
|
|
||||||
return {};
|
|
||||||
|
|
||||||
LOG_TRACE(log, "Will insert data to tables");
|
|
||||||
|
|
||||||
/// Storages and table locks must exist while we're executing data restoring tasks.
|
|
||||||
auto storages = std::make_shared<std::vector<StoragePtr>>();
|
|
||||||
auto table_locks = std::make_shared<std::vector<TableLockHolder>>();
|
|
||||||
storages->reserve(table_infos.size());
|
|
||||||
table_locks->reserve(table_infos.size());
|
|
||||||
for (const auto & table_info : table_infos | boost::adaptors::map_values)
|
|
||||||
{
|
{
|
||||||
storages->push_back(table_info.storage);
|
LOG_ERROR(log, "{} failed with {}", toUpperFirst(current_status), message);
|
||||||
table_locks->push_back(table_info.table_lock);
|
if (restore_coordination)
|
||||||
}
|
restore_coordination->setStatus(restore_settings.host_id, new_status, message);
|
||||||
|
|
||||||
DataRestoreTasks res_tasks;
|
|
||||||
for (const auto & task : data_restore_tasks)
|
|
||||||
res_tasks.push_back([task, storages, table_locks] { task(); });
|
|
||||||
|
|
||||||
if (access_restore_task)
|
|
||||||
res_tasks.push_back([task = access_restore_task, access_control = &context->getAccessControl()] { task->restore(*access_control); });
|
|
||||||
|
|
||||||
return res_tasks;
|
|
||||||
}
|
|
||||||
|
|
||||||
void RestorerFromBackup::setStage(Stage new_stage, const String & error_message)
|
|
||||||
{
|
|
||||||
if (new_stage == Stage::kError)
|
|
||||||
LOG_ERROR(log, "{} failed with error: {}", toString(current_stage), error_message);
|
|
||||||
else
|
|
||||||
LOG_TRACE(log, "{}", toString(new_stage));
|
|
||||||
|
|
||||||
current_stage = new_stage;
|
|
||||||
|
|
||||||
if (!restore_coordination)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (new_stage == Stage::kError)
|
|
||||||
{
|
|
||||||
restore_coordination->syncStageError(restore_settings.host_id, error_message);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto all_hosts
|
LOG_TRACE(log, "{}", toUpperFirst(new_status));
|
||||||
= BackupSettings::Util::filterHostIDs(restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num);
|
current_status = new_status;
|
||||||
restore_coordination->syncStage(restore_settings.host_id, static_cast<int>(new_stage), all_hosts, timeout);
|
if (restore_coordination)
|
||||||
|
restore_coordination->setStatusAndWait(restore_settings.host_id, new_status, message, all_hosts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -302,7 +264,7 @@ void RestorerFromBackup::findRootPathsInBackup()
|
|||||||
", "));
|
", "));
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::collectDatabaseAndTableInfos()
|
void RestorerFromBackup::findDatabasesAndTablesInBackup()
|
||||||
{
|
{
|
||||||
database_infos.clear();
|
database_infos.clear();
|
||||||
table_infos.clear();
|
table_infos.clear();
|
||||||
@ -312,22 +274,22 @@ void RestorerFromBackup::collectDatabaseAndTableInfos()
|
|||||||
{
|
{
|
||||||
case ASTBackupQuery::ElementType::TABLE:
|
case ASTBackupQuery::ElementType::TABLE:
|
||||||
{
|
{
|
||||||
collectTableInfo({element.database_name, element.table_name}, false, element.partitions);
|
findTableInBackup({element.database_name, element.table_name}, element.partitions);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case ASTBackupQuery::ElementType::TEMPORARY_TABLE:
|
case ASTBackupQuery::ElementType::TEMPORARY_TABLE:
|
||||||
{
|
{
|
||||||
collectTableInfo({element.database_name, element.table_name}, true, element.partitions);
|
findTableInBackup({DatabaseCatalog::TEMPORARY_DATABASE, element.table_name}, element.partitions);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case ASTBackupQuery::ElementType::DATABASE:
|
case ASTBackupQuery::ElementType::DATABASE:
|
||||||
{
|
{
|
||||||
collectDatabaseInfo(element.database_name, element.except_tables, /* throw_if_no_database_metadata_in_backup= */ true);
|
findDatabaseInBackup(element.database_name, element.except_tables);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case ASTBackupQuery::ElementType::ALL:
|
case ASTBackupQuery::ElementType::ALL:
|
||||||
{
|
{
|
||||||
collectAllDatabasesInfo(element.except_databases, element.except_tables);
|
findEverythingInBackup(element.except_databases, element.except_tables);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -336,9 +298,9 @@ void RestorerFromBackup::collectDatabaseAndTableInfos()
|
|||||||
LOG_INFO(log, "Will restore {} databases and {} tables", database_infos.size(), table_infos.size());
|
LOG_INFO(log, "Will restore {} databases and {} tables", database_infos.size(), table_infos.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::collectTableInfo(const QualifiedTableName & table_name_in_backup, bool is_temporary_table, const std::optional<ASTs> & partitions)
|
void RestorerFromBackup::findTableInBackup(const QualifiedTableName & table_name_in_backup, const std::optional<ASTs> & partitions)
|
||||||
{
|
{
|
||||||
String database_name_in_backup = is_temporary_table ? DatabaseCatalog::TEMPORARY_DATABASE : table_name_in_backup.database;
|
bool is_temporary_table = (table_name_in_backup.database == DatabaseCatalog::TEMPORARY_DATABASE);
|
||||||
|
|
||||||
std::optional<fs::path> metadata_path;
|
std::optional<fs::path> metadata_path;
|
||||||
std::optional<fs::path> root_path_in_use;
|
std::optional<fs::path> root_path_in_use;
|
||||||
@ -365,21 +327,20 @@ void RestorerFromBackup::collectTableInfo(const QualifiedTableName & table_name_
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!metadata_path)
|
if (!metadata_path)
|
||||||
throw Exception(ErrorCodes::BACKUP_ENTRY_NOT_FOUND, "Table {} not found in backup", table_name_in_backup.getFullName());
|
throw Exception(
|
||||||
|
ErrorCodes::BACKUP_ENTRY_NOT_FOUND,
|
||||||
|
"{} not found in backup",
|
||||||
|
tableNameWithTypeToString(table_name_in_backup.database, table_name_in_backup.table, true));
|
||||||
|
|
||||||
TableKey table_key;
|
|
||||||
fs::path data_path_in_backup;
|
fs::path data_path_in_backup;
|
||||||
if (is_temporary_table)
|
if (is_temporary_table)
|
||||||
{
|
{
|
||||||
data_path_in_backup = *root_path_in_use / "temporary_tables" / "data" / escapeForFileName(table_name_in_backup.table);
|
data_path_in_backup = *root_path_in_use / "temporary_tables" / "data" / escapeForFileName(table_name_in_backup.table);
|
||||||
table_key.name.table = renaming_map.getNewTemporaryTableName(table_name_in_backup.table);
|
|
||||||
table_key.is_temporary = true;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
data_path_in_backup
|
data_path_in_backup
|
||||||
= *root_path_in_use / "data" / escapeForFileName(table_name_in_backup.database) / escapeForFileName(table_name_in_backup.table);
|
= *root_path_in_use / "data" / escapeForFileName(table_name_in_backup.database) / escapeForFileName(table_name_in_backup.table);
|
||||||
table_key.name = renaming_map.getNewTableName(table_name_in_backup);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto read_buffer = backup->readFile(*metadata_path)->getReadBuffer();
|
auto read_buffer = backup->readFile(*metadata_path)->getReadBuffer();
|
||||||
@ -388,27 +349,30 @@ void RestorerFromBackup::collectTableInfo(const QualifiedTableName & table_name_
|
|||||||
read_buffer.reset();
|
read_buffer.reset();
|
||||||
ParserCreateQuery create_parser;
|
ParserCreateQuery create_parser;
|
||||||
ASTPtr create_table_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
ASTPtr create_table_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||||
renameDatabaseAndTableNameInCreateQuery(context->getGlobalContext(), renaming_map, create_table_query);
|
renameDatabaseAndTableNameInCreateQuery(create_table_query, renaming_map, context->getGlobalContext());
|
||||||
|
|
||||||
if (auto it = table_infos.find(table_key); it != table_infos.end())
|
QualifiedTableName table_name = renaming_map.getNewTableName(table_name_in_backup);
|
||||||
|
|
||||||
|
if (auto it = table_infos.find(table_name); it != table_infos.end())
|
||||||
{
|
{
|
||||||
const TableInfo & table_info = it->second;
|
const TableInfo & table_info = it->second;
|
||||||
if (table_info.create_table_query && (serializeAST(*table_info.create_table_query) != serializeAST(*create_table_query)))
|
if (table_info.create_table_query && (serializeAST(*table_info.create_table_query) != serializeAST(*create_table_query)))
|
||||||
{
|
{
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::CANNOT_RESTORE_TABLE,
|
ErrorCodes::CANNOT_RESTORE_TABLE,
|
||||||
"Extracted two different create queries for the same {}table {}: {} and {}",
|
"Extracted two different create queries for the same {}: {} and {}",
|
||||||
(is_temporary_table ? "temporary " : ""),
|
tableNameWithTypeToString(table_name.database, table_name.table, false),
|
||||||
table_key.name.getFullName(),
|
|
||||||
serializeAST(*table_info.create_table_query),
|
serializeAST(*table_info.create_table_query),
|
||||||
serializeAST(*create_table_query));
|
serializeAST(*create_table_query));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TableInfo & res_table_info = table_infos[table_key];
|
TableInfo & res_table_info = table_infos[table_name];
|
||||||
res_table_info.create_table_query = create_table_query;
|
res_table_info.create_table_query = create_table_query;
|
||||||
|
res_table_info.is_predefined_table = DatabaseCatalog::instance().isPredefinedTable(StorageID{table_name.database, table_name.table});
|
||||||
|
res_table_info.dependencies = getDependenciesSetFromCreateQuery(context->getGlobalContext(), table_name, create_table_query);
|
||||||
|
res_table_info.has_data = backup->hasFiles(data_path_in_backup);
|
||||||
res_table_info.data_path_in_backup = data_path_in_backup;
|
res_table_info.data_path_in_backup = data_path_in_backup;
|
||||||
res_table_info.dependencies = getDependenciesSetFromCreateQuery(context->getGlobalContext(), table_key.name, create_table_query);
|
|
||||||
|
|
||||||
if (partitions)
|
if (partitions)
|
||||||
{
|
{
|
||||||
@ -417,35 +381,45 @@ void RestorerFromBackup::collectTableInfo(const QualifiedTableName & table_name_
|
|||||||
insertAtEnd(*res_table_info.partitions, *partitions);
|
insertAtEnd(*res_table_info.partitions, *partitions);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hasSystemAccessTableEngine(*create_table_query))
|
if (!restore_settings.structure_only && isSystemAccessTableName(table_name))
|
||||||
{
|
{
|
||||||
if (!access_restore_task)
|
if (!access_restorer)
|
||||||
access_restore_task = std::make_shared<AccessRestoreTask>(backup, restore_settings, restore_coordination);
|
access_restorer = std::make_unique<AccessRestorerFromBackup>(backup, restore_settings);
|
||||||
access_restore_task->addDataPath(data_path_in_backup);
|
access_restorer->addDataPath(data_path_in_backup, table_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::collectDatabaseInfo(const String & database_name_in_backup, const std::set<DatabaseAndTableName> & except_table_names, bool throw_if_no_database_metadata_in_backup)
|
void RestorerFromBackup::findDatabaseInBackup(const String & database_name_in_backup, const std::set<DatabaseAndTableName> & except_table_names)
|
||||||
{
|
{
|
||||||
std::optional<fs::path> metadata_path;
|
std::optional<fs::path> metadata_path;
|
||||||
std::unordered_set<String> table_names_in_backup;
|
std::unordered_set<String> table_names_in_backup;
|
||||||
for (const auto & root_path_in_backup : root_paths_in_backup)
|
for (const auto & root_path_in_backup : root_paths_in_backup)
|
||||||
{
|
{
|
||||||
fs::path try_metadata_path = root_path_in_backup / "metadata" / (escapeForFileName(database_name_in_backup) + ".sql");
|
fs::path try_metadata_path, try_tables_metadata_path;
|
||||||
if (!metadata_path && backup->fileExists(try_metadata_path))
|
if (database_name_in_backup == DatabaseCatalog::TEMPORARY_DATABASE)
|
||||||
|
{
|
||||||
|
try_tables_metadata_path = root_path_in_backup / "temporary_tables" / "metadata";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
try_metadata_path = root_path_in_backup / "metadata" / (escapeForFileName(database_name_in_backup) + ".sql");
|
||||||
|
try_tables_metadata_path = root_path_in_backup / "metadata" / escapeForFileName(database_name_in_backup);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!metadata_path && !try_metadata_path.empty() && backup->fileExists(try_metadata_path))
|
||||||
metadata_path = try_metadata_path;
|
metadata_path = try_metadata_path;
|
||||||
|
|
||||||
Strings file_names = backup->listFiles(root_path_in_backup / "metadata" / escapeForFileName(database_name_in_backup));
|
Strings file_names = backup->listFiles(try_tables_metadata_path);
|
||||||
for (const String & file_name : file_names)
|
for (const String & file_name : file_names)
|
||||||
{
|
{
|
||||||
if (!file_name.ends_with(sql_ext))
|
if (!file_name.ends_with(".sql"))
|
||||||
continue;
|
continue;
|
||||||
String file_name_without_ext = file_name.substr(0, file_name.length() - sql_ext.length());
|
String file_name_without_ext = file_name.substr(0, file_name.length() - strlen(".sql"));
|
||||||
table_names_in_backup.insert(unescapeForFileName(file_name_without_ext));
|
table_names_in_backup.insert(unescapeForFileName(file_name_without_ext));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!metadata_path && throw_if_no_database_metadata_in_backup)
|
if (!metadata_path && table_names_in_backup.empty())
|
||||||
throw Exception(ErrorCodes::BACKUP_ENTRY_NOT_FOUND, "Database {} not found in backup", backQuoteIfNeed(database_name_in_backup));
|
throw Exception(ErrorCodes::BACKUP_ENTRY_NOT_FOUND, "Database {} not found in backup", backQuoteIfNeed(database_name_in_backup));
|
||||||
|
|
||||||
if (metadata_path)
|
if (metadata_path)
|
||||||
@ -456,7 +430,7 @@ void RestorerFromBackup::collectDatabaseInfo(const String & database_name_in_bac
|
|||||||
read_buffer.reset();
|
read_buffer.reset();
|
||||||
ParserCreateQuery create_parser;
|
ParserCreateQuery create_parser;
|
||||||
ASTPtr create_database_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
ASTPtr create_database_query = parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||||
renameDatabaseAndTableNameInCreateQuery(context->getGlobalContext(), renaming_map, create_database_query);
|
renameDatabaseAndTableNameInCreateQuery(create_database_query, renaming_map, context->getGlobalContext());
|
||||||
|
|
||||||
String database_name = renaming_map.getNewDatabaseName(database_name_in_backup);
|
String database_name = renaming_map.getNewDatabaseName(database_name_in_backup);
|
||||||
DatabaseInfo & database_info = database_infos[database_name];
|
DatabaseInfo & database_info = database_infos[database_name];
|
||||||
@ -472,6 +446,7 @@ void RestorerFromBackup::collectDatabaseInfo(const String & database_name_in_bac
|
|||||||
}
|
}
|
||||||
|
|
||||||
database_info.create_database_query = create_database_query;
|
database_info.create_database_query = create_database_query;
|
||||||
|
database_info.is_predefined_database = DatabaseCatalog::isPredefinedDatabase(database_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const String & table_name_in_backup : table_names_in_backup)
|
for (const String & table_name_in_backup : table_names_in_backup)
|
||||||
@ -479,33 +454,26 @@ void RestorerFromBackup::collectDatabaseInfo(const String & database_name_in_bac
|
|||||||
if (except_table_names.contains({database_name_in_backup, table_name_in_backup}))
|
if (except_table_names.contains({database_name_in_backup, table_name_in_backup}))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
collectTableInfo({database_name_in_backup, table_name_in_backup}, /* is_temporary_table= */ false, /* partitions= */ {});
|
findTableInBackup({database_name_in_backup, table_name_in_backup}, /* partitions= */ {});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::collectAllDatabasesInfo(const std::set<String> & except_database_names, const std::set<DatabaseAndTableName> & except_table_names)
|
void RestorerFromBackup::findEverythingInBackup(const std::set<String> & except_database_names, const std::set<DatabaseAndTableName> & except_table_names)
|
||||||
{
|
{
|
||||||
std::unordered_set<String> database_names_in_backup;
|
std::unordered_set<String> database_names_in_backup;
|
||||||
std::unordered_set<String> temporary_table_names_in_backup;
|
|
||||||
|
|
||||||
for (const auto & root_path_in_backup : root_paths_in_backup)
|
for (const auto & root_path_in_backup : root_paths_in_backup)
|
||||||
{
|
{
|
||||||
Strings file_names = backup->listFiles(root_path_in_backup / "metadata");
|
Strings file_names = backup->listFiles(root_path_in_backup / "metadata");
|
||||||
for (String & file_name : file_names)
|
for (String & file_name : file_names)
|
||||||
{
|
{
|
||||||
if (file_name.ends_with(sql_ext))
|
if (file_name.ends_with(".sql"))
|
||||||
file_name.resize(file_name.length() - sql_ext.length());
|
file_name.resize(file_name.length() - strlen(".sql"));
|
||||||
database_names_in_backup.emplace(unescapeForFileName(file_name));
|
database_names_in_backup.emplace(unescapeForFileName(file_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
file_names = backup->listFiles(root_path_in_backup / "temporary_tables" / "metadata");
|
if (backup->hasFiles(root_path_in_backup / "temporary_tables" / "metadata"))
|
||||||
for (String & file_name : file_names)
|
database_names_in_backup.emplace(DatabaseCatalog::TEMPORARY_DATABASE);
|
||||||
{
|
|
||||||
if (!file_name.ends_with(sql_ext))
|
|
||||||
continue;
|
|
||||||
file_name.resize(file_name.length() - sql_ext.length());
|
|
||||||
temporary_table_names_in_backup.emplace(unescapeForFileName(file_name));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const String & database_name_in_backup : database_names_in_backup)
|
for (const String & database_name_in_backup : database_names_in_backup)
|
||||||
@ -513,19 +481,16 @@ void RestorerFromBackup::collectAllDatabasesInfo(const std::set<String> & except
|
|||||||
if (except_database_names.contains(database_name_in_backup))
|
if (except_database_names.contains(database_name_in_backup))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
collectDatabaseInfo(database_name_in_backup, except_table_names, /* throw_if_no_database_metadata_in_backup= */ false);
|
findDatabaseInBackup(database_name_in_backup, except_table_names);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const String & temporary_table_name_in_backup : temporary_table_names_in_backup)
|
|
||||||
collectTableInfo({"", temporary_table_name_in_backup}, /* is_temporary_table= */ true, /* partitions= */ {});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::checkAccessForCollectedInfos() const
|
void RestorerFromBackup::checkAccessForObjectsFoundInBackup() const
|
||||||
{
|
{
|
||||||
AccessRightsElements required_access;
|
AccessRightsElements required_access;
|
||||||
for (const auto & database_name : database_infos | boost::adaptors::map_keys)
|
for (const auto & [database_name, database_info] : database_infos)
|
||||||
{
|
{
|
||||||
if (DatabaseCatalog::isPredefinedDatabaseName(database_name))
|
if (database_info.is_predefined_database)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
AccessFlags flags;
|
AccessFlags flags;
|
||||||
@ -541,10 +506,20 @@ void RestorerFromBackup::checkAccessForCollectedInfos() const
|
|||||||
|
|
||||||
for (const auto & [table_name, table_info] : table_infos)
|
for (const auto & [table_name, table_info] : table_infos)
|
||||||
{
|
{
|
||||||
if (hasSystemTableEngine(*table_info.create_table_query))
|
if (table_info.is_predefined_table)
|
||||||
|
{
|
||||||
|
if (isSystemFunctionsTableName(table_name))
|
||||||
|
{
|
||||||
|
/// CREATE_FUNCTION privilege is required to restore the "system.functions" table.
|
||||||
|
if (!restore_settings.structure_only && table_info.has_data)
|
||||||
|
required_access.emplace_back(AccessType::CREATE_FUNCTION);
|
||||||
|
}
|
||||||
|
/// Privileges required to restore ACL system tables are checked separately
|
||||||
|
/// (see access_restore_task->getRequiredAccess() below).
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (table_name.is_temporary)
|
if (table_name.database == DatabaseCatalog::TEMPORARY_DATABASE)
|
||||||
{
|
{
|
||||||
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
|
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
|
||||||
required_access.emplace_back(AccessType::CREATE_TEMPORARY_TABLE);
|
required_access.emplace_back(AccessType::CREATE_TEMPORARY_TABLE);
|
||||||
@ -564,8 +539,7 @@ void RestorerFromBackup::checkAccessForCollectedInfos() const
|
|||||||
flags |= AccessType::CREATE_TABLE;
|
flags |= AccessType::CREATE_TABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!restore_settings.structure_only && !create.is_dictionary && !create.is_ordinary_view
|
if (!restore_settings.structure_only && table_info.has_data)
|
||||||
&& backup->hasFiles(table_info.data_path_in_backup))
|
|
||||||
{
|
{
|
||||||
flags |= AccessType::INSERT;
|
flags |= AccessType::INSERT;
|
||||||
}
|
}
|
||||||
@ -578,11 +552,11 @@ void RestorerFromBackup::checkAccessForCollectedInfos() const
|
|||||||
flags = AccessType::SHOW_TABLES;
|
flags = AccessType::SHOW_TABLES;
|
||||||
}
|
}
|
||||||
|
|
||||||
required_access.emplace_back(flags, table_name.name.database, table_name.name.table);
|
required_access.emplace_back(flags, table_name.database, table_name.table);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (access_restore_task)
|
if (access_restorer)
|
||||||
insertAtEnd(required_access, access_restore_task->getRequiredAccess());
|
insertAtEnd(required_access, access_restorer->getRequiredAccess());
|
||||||
|
|
||||||
/// We convert to AccessRights and back to check access rights in a predictable way
|
/// We convert to AccessRights and back to check access rights in a predictable way
|
||||||
/// (some elements could be duplicated or not sorted).
|
/// (some elements could be duplicated or not sorted).
|
||||||
@ -597,7 +571,7 @@ void RestorerFromBackup::createDatabases()
|
|||||||
for (const auto & [database_name, database_info] : database_infos)
|
for (const auto & [database_name, database_info] : database_infos)
|
||||||
{
|
{
|
||||||
bool need_create_database = (restore_settings.create_database != RestoreDatabaseCreationMode::kMustExist);
|
bool need_create_database = (restore_settings.create_database != RestoreDatabaseCreationMode::kMustExist);
|
||||||
if (need_create_database && DatabaseCatalog::isPredefinedDatabaseName(database_name))
|
if (database_info.is_predefined_database)
|
||||||
need_create_database = false; /// Predefined databases always exist.
|
need_create_database = false; /// Predefined databases always exist.
|
||||||
|
|
||||||
if (need_create_database)
|
if (need_create_database)
|
||||||
@ -610,15 +584,18 @@ void RestorerFromBackup::createDatabases()
|
|||||||
create_database_query->as<ASTCreateQuery &>().if_not_exists = true;
|
create_database_query->as<ASTCreateQuery &>().if_not_exists = true;
|
||||||
}
|
}
|
||||||
LOG_TRACE(log, "Creating database {}: {}", backQuoteIfNeed(database_name), serializeAST(*create_database_query));
|
LOG_TRACE(log, "Creating database {}: {}", backQuoteIfNeed(database_name), serializeAST(*create_database_query));
|
||||||
executeCreateQuery(create_database_query);
|
InterpreterCreateQuery interpreter{create_database_query, context};
|
||||||
|
interpreter.setInternal(true);
|
||||||
|
interpreter.execute();
|
||||||
}
|
}
|
||||||
|
|
||||||
DatabasePtr database = DatabaseCatalog::instance().getDatabase(database_name);
|
DatabasePtr database = DatabaseCatalog::instance().getDatabase(database_name);
|
||||||
|
|
||||||
if (!restore_settings.allow_different_database_def)
|
if (!restore_settings.allow_different_database_def && !database_info.is_predefined_database)
|
||||||
{
|
{
|
||||||
/// Check that the database's definition is the same as expected.
|
/// Check that the database's definition is the same as expected.
|
||||||
ASTPtr create_database_query = database->getCreateDatabaseQueryForBackup();
|
ASTPtr create_database_query = database->getCreateDatabaseQuery();
|
||||||
|
adjustCreateQueryForBackup(create_database_query, context->getGlobalContext(), nullptr);
|
||||||
ASTPtr expected_create_query = database_info.create_database_query;
|
ASTPtr expected_create_query = database_info.create_database_query;
|
||||||
if (serializeAST(*create_database_query) != serializeAST(*expected_create_query))
|
if (serializeAST(*create_database_query) != serializeAST(*expected_create_query))
|
||||||
{
|
{
|
||||||
@ -643,62 +620,62 @@ void RestorerFromBackup::createTables()
|
|||||||
if (tables_to_create.empty())
|
if (tables_to_create.empty())
|
||||||
break; /// We've already created all the tables.
|
break; /// We've already created all the tables.
|
||||||
|
|
||||||
for (const auto & table_key : tables_to_create)
|
for (const auto & table_name : tables_to_create)
|
||||||
{
|
{
|
||||||
auto & table_info = table_infos.at(table_key);
|
auto & table_info = table_infos.at(table_name);
|
||||||
|
|
||||||
DatabasePtr database;
|
DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_name.database);
|
||||||
if (table_key.is_temporary)
|
|
||||||
database = DatabaseCatalog::instance().getDatabaseForTemporaryTables();
|
|
||||||
else
|
|
||||||
database = DatabaseCatalog::instance().getDatabase(table_key.name.database);
|
|
||||||
|
|
||||||
bool need_create_table = (restore_settings.create_table != RestoreTableCreationMode::kMustExist);
|
bool need_create_table = (restore_settings.create_table != RestoreTableCreationMode::kMustExist);
|
||||||
if (need_create_table && hasSystemTableEngine(*table_info.create_table_query))
|
if (table_info.is_predefined_table)
|
||||||
need_create_table = false; /// Tables with System* table engine already exist or can't be created by SQL anyway.
|
need_create_table = false; /// Predefined tables always exist.
|
||||||
|
|
||||||
if (need_create_table)
|
if (need_create_table)
|
||||||
{
|
{
|
||||||
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
|
|
||||||
/// database-specific things).
|
|
||||||
auto create_table_query = table_info.create_table_query;
|
auto create_table_query = table_info.create_table_query;
|
||||||
if (restore_settings.create_table == RestoreTableCreationMode::kCreateIfNotExists)
|
if (restore_settings.create_table == RestoreTableCreationMode::kCreateIfNotExists)
|
||||||
{
|
{
|
||||||
create_table_query = create_table_query->clone();
|
create_table_query = create_table_query->clone();
|
||||||
create_table_query->as<ASTCreateQuery &>().if_not_exists = true;
|
create_table_query->as<ASTCreateQuery &>().if_not_exists = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TRACE(
|
LOG_TRACE(
|
||||||
log,
|
log,
|
||||||
"Creating {}table {}: {}",
|
"Creating {}: {}",
|
||||||
(table_key.is_temporary ? "temporary " : ""),
|
tableNameWithTypeToString(table_name.database, table_name.table, false),
|
||||||
table_key.name.getFullName(),
|
|
||||||
serializeAST(*create_table_query));
|
serializeAST(*create_table_query));
|
||||||
|
|
||||||
database->createTableRestoredFromBackup(create_table_query, *this);
|
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
|
||||||
|
/// database-specific things).
|
||||||
|
database->createTableRestoredFromBackup(
|
||||||
|
create_table_query,
|
||||||
|
context,
|
||||||
|
restore_coordination,
|
||||||
|
std::chrono::duration_cast<std::chrono::milliseconds>(create_table_timeout).count());
|
||||||
}
|
}
|
||||||
|
|
||||||
table_info.created = true;
|
table_info.created = true;
|
||||||
|
|
||||||
auto resolved_id = table_key.is_temporary
|
auto resolved_id = (table_name.database == DatabaseCatalog::TEMPORARY_DATABASE)
|
||||||
? context->resolveStorageID(StorageID{"", table_key.name.table}, Context::ResolveExternal)
|
? context->resolveStorageID(StorageID{"", table_name.table}, Context::ResolveExternal)
|
||||||
: context->resolveStorageID(StorageID{table_key.name.database, table_key.name.table}, Context::ResolveGlobal);
|
: context->resolveStorageID(StorageID{table_name.database, table_name.table}, Context::ResolveGlobal);
|
||||||
|
|
||||||
auto storage = database->getTable(resolved_id.table_name, context);
|
auto storage = database->getTable(resolved_id.table_name, context);
|
||||||
table_info.storage = storage;
|
table_info.storage = storage;
|
||||||
table_info.table_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
table_info.table_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout);
|
||||||
|
|
||||||
if (!restore_settings.allow_different_table_def)
|
if (!restore_settings.allow_different_table_def && !table_info.is_predefined_table)
|
||||||
{
|
{
|
||||||
ASTPtr create_table_query = storage->getCreateQueryForBackup(context, nullptr);
|
ASTPtr create_table_query = database->getCreateTableQuery(resolved_id.table_name, context);
|
||||||
|
adjustCreateQueryForBackup(create_table_query, context->getGlobalContext(), nullptr);
|
||||||
ASTPtr expected_create_query = table_info.create_table_query;
|
ASTPtr expected_create_query = table_info.create_table_query;
|
||||||
if (serializeAST(*create_table_query) != serializeAST(*expected_create_query))
|
if (serializeAST(*create_table_query) != serializeAST(*expected_create_query))
|
||||||
{
|
{
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::CANNOT_RESTORE_TABLE,
|
ErrorCodes::CANNOT_RESTORE_TABLE,
|
||||||
"The {}table {} has a different definition: {} "
|
"{} has a different definition: {} "
|
||||||
"comparing to its definition in the backup: {}",
|
"comparing to its definition in the backup: {}",
|
||||||
(table_key.is_temporary ? "temporary " : ""),
|
tableNameWithTypeToString(table_name.database, table_name.table, true),
|
||||||
table_key.name.getFullName(),
|
|
||||||
serializeAST(*create_table_query),
|
serializeAST(*create_table_query),
|
||||||
serializeAST(*expected_create_query));
|
serializeAST(*expected_create_query));
|
||||||
}
|
}
|
||||||
@ -708,6 +685,15 @@ void RestorerFromBackup::createTables()
|
|||||||
{
|
{
|
||||||
const auto & data_path_in_backup = table_info.data_path_in_backup;
|
const auto & data_path_in_backup = table_info.data_path_in_backup;
|
||||||
const auto & partitions = table_info.partitions;
|
const auto & partitions = table_info.partitions;
|
||||||
|
if (partitions && !storage->supportsBackupPartition())
|
||||||
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::CANNOT_RESTORE_TABLE,
|
||||||
|
"Table engine {} doesn't support partitions, cannot restore {}",
|
||||||
|
storage->getName(),
|
||||||
|
tableNameWithTypeToString(table_name.database, table_name.table, false));
|
||||||
|
}
|
||||||
|
|
||||||
storage->restoreDataFromBackup(*this, data_path_in_backup, partitions);
|
storage->restoreDataFromBackup(*this, data_path_in_backup, partitions);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -715,9 +701,9 @@ void RestorerFromBackup::createTables()
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the list of tables without dependencies or those which dependencies have been created before.
|
/// Returns the list of tables without dependencies or those which dependencies have been created before.
|
||||||
std::vector<RestorerFromBackup::TableKey> RestorerFromBackup::findTablesWithoutDependencies() const
|
std::vector<QualifiedTableName> RestorerFromBackup::findTablesWithoutDependencies() const
|
||||||
{
|
{
|
||||||
std::vector<TableKey> tables_without_dependencies;
|
std::vector<QualifiedTableName> tables_without_dependencies;
|
||||||
bool all_tables_created = true;
|
bool all_tables_created = true;
|
||||||
|
|
||||||
for (const auto & [key, table_info] : table_infos)
|
for (const auto & [key, table_info] : table_infos)
|
||||||
@ -732,7 +718,7 @@ std::vector<RestorerFromBackup::TableKey> RestorerFromBackup::findTablesWithoutD
|
|||||||
bool all_dependencies_met = true;
|
bool all_dependencies_met = true;
|
||||||
for (const auto & dependency : table_info.dependencies)
|
for (const auto & dependency : table_info.dependencies)
|
||||||
{
|
{
|
||||||
auto it = table_infos.find(TableKey{dependency, false});
|
auto it = table_infos.find(dependency);
|
||||||
if ((it != table_infos.end()) && !it->second.created)
|
if ((it != table_infos.end()) && !it->second.created)
|
||||||
{
|
{
|
||||||
all_dependencies_met = false;
|
all_dependencies_met = false;
|
||||||
@ -751,7 +737,7 @@ std::vector<RestorerFromBackup::TableKey> RestorerFromBackup::findTablesWithoutD
|
|||||||
return {};
|
return {};
|
||||||
|
|
||||||
/// Cyclic dependency? We'll try to create those tables anyway but probably it's going to fail.
|
/// Cyclic dependency? We'll try to create those tables anyway but probably it's going to fail.
|
||||||
std::vector<TableKey> tables_with_cyclic_dependencies;
|
std::vector<QualifiedTableName> tables_with_cyclic_dependencies;
|
||||||
for (const auto & [key, table_info] : table_infos)
|
for (const auto & [key, table_info] : table_infos)
|
||||||
{
|
{
|
||||||
if (!table_info.created)
|
if (!table_info.created)
|
||||||
@ -764,7 +750,7 @@ std::vector<RestorerFromBackup::TableKey> RestorerFromBackup::findTablesWithoutD
|
|||||||
"Some tables have cyclic dependency from each other: {}",
|
"Some tables have cyclic dependency from each other: {}",
|
||||||
boost::algorithm::join(
|
boost::algorithm::join(
|
||||||
tables_with_cyclic_dependencies
|
tables_with_cyclic_dependencies
|
||||||
| boost::adaptors::transformed([](const TableKey & key) -> String { return key.name.getFullName(); }),
|
| boost::adaptors::transformed([](const QualifiedTableName & table_name) -> String { return table_name.getFullName(); }),
|
||||||
", "));
|
", "));
|
||||||
|
|
||||||
return tables_with_cyclic_dependencies;
|
return tables_with_cyclic_dependencies;
|
||||||
@ -772,38 +758,52 @@ std::vector<RestorerFromBackup::TableKey> RestorerFromBackup::findTablesWithoutD
|
|||||||
|
|
||||||
void RestorerFromBackup::addDataRestoreTask(DataRestoreTask && new_task)
|
void RestorerFromBackup::addDataRestoreTask(DataRestoreTask && new_task)
|
||||||
{
|
{
|
||||||
if (current_stage == Stage::kInsertingDataToTables)
|
if (current_status == kInsertingDataToTablesStatus)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding data-restoring tasks is not allowed");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding data-restoring tasks is not allowed");
|
||||||
data_restore_tasks.push_back(std::move(new_task));
|
data_restore_tasks.push_back(std::move(new_task));
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::addDataRestoreTasks(DataRestoreTasks && new_tasks)
|
void RestorerFromBackup::addDataRestoreTasks(DataRestoreTasks && new_tasks)
|
||||||
{
|
{
|
||||||
if (current_stage == Stage::kInsertingDataToTables)
|
if (current_status == kInsertingDataToTablesStatus)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding data-restoring tasks is not allowed");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Adding data-restoring tasks is not allowed");
|
||||||
insertAtEnd(data_restore_tasks, std::move(new_tasks));
|
insertAtEnd(data_restore_tasks, std::move(new_tasks));
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::checkPathInBackupToRestoreAccess(const String & path)
|
RestorerFromBackup::DataRestoreTasks RestorerFromBackup::getDataRestoreTasks()
|
||||||
{
|
{
|
||||||
if (!access_restore_task || !access_restore_task->hasDataPath(path))
|
if (data_restore_tasks.empty())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Path to restore access was not added");
|
return {};
|
||||||
|
|
||||||
|
LOG_TRACE(log, "Will insert data to tables");
|
||||||
|
|
||||||
|
/// Storages and table locks must exist while we're executing data restoring tasks.
|
||||||
|
auto storages = std::make_shared<std::vector<StoragePtr>>();
|
||||||
|
auto table_locks = std::make_shared<std::vector<TableLockHolder>>();
|
||||||
|
storages->reserve(table_infos.size());
|
||||||
|
table_locks->reserve(table_infos.size());
|
||||||
|
for (const auto & table_info : table_infos | boost::adaptors::map_values)
|
||||||
|
{
|
||||||
|
storages->push_back(table_info.storage);
|
||||||
|
table_locks->push_back(table_info.table_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
DataRestoreTasks res_tasks;
|
||||||
|
for (const auto & task : data_restore_tasks)
|
||||||
|
res_tasks.push_back([task, storages, table_locks] { task(); });
|
||||||
|
|
||||||
|
return res_tasks;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::executeCreateQuery(const ASTPtr & create_query) const
|
std::vector<std::pair<UUID, AccessEntityPtr>> RestorerFromBackup::getAccessEntitiesToRestore()
|
||||||
{
|
{
|
||||||
InterpreterCreateQuery interpreter{create_query, context};
|
if (!access_restorer || access_restored)
|
||||||
interpreter.setInternal(true);
|
return {};
|
||||||
interpreter.execute();
|
|
||||||
}
|
|
||||||
|
|
||||||
void RestorerFromBackup::throwPartitionsNotSupported(const StorageID & storage_id, const String & table_engine)
|
/// getAccessEntitiesToRestore() will return entities only when called first time (we don't want to restore the same entities again).
|
||||||
{
|
access_restored = true;
|
||||||
throw Exception(
|
|
||||||
ErrorCodes::CANNOT_RESTORE_TABLE,
|
return access_restorer->getAccessEntities(context->getAccessControl());
|
||||||
"Table engine {} doesn't support partitions, cannot table {}",
|
|
||||||
table_engine,
|
|
||||||
storage_id.getFullTableName());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::throwTableIsNotEmpty(const StorageID & storage_id)
|
void RestorerFromBackup::throwTableIsNotEmpty(const StorageID & storage_id)
|
||||||
|
@ -15,7 +15,9 @@ class IBackup;
|
|||||||
using BackupPtr = std::shared_ptr<const IBackup>;
|
using BackupPtr = std::shared_ptr<const IBackup>;
|
||||||
class IRestoreCoordination;
|
class IRestoreCoordination;
|
||||||
struct StorageID;
|
struct StorageID;
|
||||||
class AccessRestoreTask;
|
class AccessRestorerFromBackup;
|
||||||
|
struct IAccessEntity;
|
||||||
|
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;
|
||||||
|
|
||||||
/// Restores the definition of databases and tables and prepares tasks to restore the data of the tables.
|
/// Restores the definition of databases and tables and prepares tasks to restore the data of the tables.
|
||||||
class RestorerFromBackup : private boost::noncopyable
|
class RestorerFromBackup : private boost::noncopyable
|
||||||
@ -26,63 +28,38 @@ public:
|
|||||||
const RestoreSettings & restore_settings_,
|
const RestoreSettings & restore_settings_,
|
||||||
std::shared_ptr<IRestoreCoordination> restore_coordination_,
|
std::shared_ptr<IRestoreCoordination> restore_coordination_,
|
||||||
const BackupPtr & backup_,
|
const BackupPtr & backup_,
|
||||||
const ContextMutablePtr & context_,
|
const ContextMutablePtr & context_);
|
||||||
std::chrono::seconds timeout_);
|
|
||||||
|
|
||||||
~RestorerFromBackup();
|
~RestorerFromBackup();
|
||||||
|
|
||||||
/// Restores the definition of databases and tables and prepares tasks to restore the data of the tables.
|
enum Mode
|
||||||
/// restoreMetadata() checks access rights internally so checkAccessRightsOnly() shouldn't be called first.
|
{
|
||||||
void restoreMetadata();
|
/// Restores databases and tables.
|
||||||
|
RESTORE,
|
||||||
|
|
||||||
/// Only checks access rights without restoring anything.
|
/// Only checks access rights without restoring anything.
|
||||||
void checkAccessOnly();
|
CHECK_ACCESS_ONLY
|
||||||
|
};
|
||||||
|
|
||||||
using DataRestoreTask = std::function<void()>;
|
using DataRestoreTask = std::function<void()>;
|
||||||
using DataRestoreTasks = std::vector<DataRestoreTask>;
|
using DataRestoreTasks = std::vector<DataRestoreTask>;
|
||||||
DataRestoreTasks getDataRestoreTasks();
|
|
||||||
|
/// Restores the metadata of databases and tables and returns tasks to restore the data of tables.
|
||||||
|
DataRestoreTasks run(Mode mode);
|
||||||
|
|
||||||
BackupPtr getBackup() const { return backup; }
|
BackupPtr getBackup() const { return backup; }
|
||||||
const RestoreSettings & getRestoreSettings() const { return restore_settings; }
|
const RestoreSettings & getRestoreSettings() const { return restore_settings; }
|
||||||
bool isNonEmptyTableAllowed() const { return getRestoreSettings().allow_non_empty_tables; }
|
bool isNonEmptyTableAllowed() const { return getRestoreSettings().allow_non_empty_tables; }
|
||||||
std::shared_ptr<IRestoreCoordination> getRestoreCoordination() const { return restore_coordination; }
|
std::shared_ptr<IRestoreCoordination> getRestoreCoordination() const { return restore_coordination; }
|
||||||
std::chrono::seconds getTimeout() const { return timeout; }
|
|
||||||
ContextMutablePtr getContext() const { return context; }
|
ContextMutablePtr getContext() const { return context; }
|
||||||
void executeCreateQuery(const ASTPtr & create_query) const;
|
|
||||||
|
|
||||||
/// Adds a data restore task which will be later returned by getDataRestoreTasks().
|
/// Adds a data restore task which will be later returned by getDataRestoreTasks().
|
||||||
/// This function can be called by implementations of IStorage::restoreFromBackup() in inherited storage classes.
|
/// This function can be called by implementations of IStorage::restoreFromBackup() in inherited storage classes.
|
||||||
void addDataRestoreTask(DataRestoreTask && new_task);
|
void addDataRestoreTask(DataRestoreTask && new_task);
|
||||||
void addDataRestoreTasks(DataRestoreTasks && new_tasks);
|
void addDataRestoreTasks(DataRestoreTasks && new_tasks);
|
||||||
|
|
||||||
/// Adds a new data path to restore access control.
|
/// Returns the list of access entities to restore.
|
||||||
void checkPathInBackupToRestoreAccess(const String & path);
|
std::vector<std::pair<UUID, AccessEntityPtr>> getAccessEntitiesToRestore();
|
||||||
|
|
||||||
/// Reading a backup includes a few stages:
|
|
||||||
enum class Stage
|
|
||||||
{
|
|
||||||
/// Initial stage.
|
|
||||||
kPreparing,
|
|
||||||
|
|
||||||
/// Finding databases and tables in the backup which we're going to restore.
|
|
||||||
kFindingTablesInBackup,
|
|
||||||
|
|
||||||
/// Creating databases or finding them and checking their definitions.
|
|
||||||
kCreatingDatabases,
|
|
||||||
|
|
||||||
/// Creating tables or finding them and checking their definition.
|
|
||||||
kCreatingTables,
|
|
||||||
|
|
||||||
/// Inserting restored data to tables.
|
|
||||||
kInsertingDataToTables,
|
|
||||||
|
|
||||||
/// An error happens during any of the stages above, the backup is not restored properly.
|
|
||||||
kError = -1,
|
|
||||||
};
|
|
||||||
static std::string_view toString(Stage stage);
|
|
||||||
|
|
||||||
/// Throws an exception that a specified table engine doesn't support partitions.
|
|
||||||
[[noreturn]] static void throwPartitionsNotSupported(const StorageID & storage_id, const String & table_engine);
|
|
||||||
|
|
||||||
/// Throws an exception that a specified table is already non-empty.
|
/// Throws an exception that a specified table is already non-empty.
|
||||||
[[noreturn]] static void throwTableIsNotEmpty(const StorageID & storage_id);
|
[[noreturn]] static void throwTableIsNotEmpty(const StorageID & storage_id);
|
||||||
@ -93,54 +70,56 @@ private:
|
|||||||
std::shared_ptr<IRestoreCoordination> restore_coordination;
|
std::shared_ptr<IRestoreCoordination> restore_coordination;
|
||||||
BackupPtr backup;
|
BackupPtr backup;
|
||||||
ContextMutablePtr context;
|
ContextMutablePtr context;
|
||||||
std::chrono::seconds timeout;
|
std::chrono::milliseconds create_table_timeout;
|
||||||
Poco::Logger * log;
|
Poco::Logger * log;
|
||||||
|
|
||||||
Stage current_stage = Stage::kPreparing;
|
Strings all_hosts;
|
||||||
std::vector<std::filesystem::path> root_paths_in_backup;
|
|
||||||
DDLRenamingMap renaming_map;
|
DDLRenamingMap renaming_map;
|
||||||
|
std::vector<std::filesystem::path> root_paths_in_backup;
|
||||||
|
|
||||||
void run(bool only_check_access);
|
|
||||||
void setStage(Stage new_stage, const String & error_message = {});
|
|
||||||
void findRootPathsInBackup();
|
void findRootPathsInBackup();
|
||||||
void collectDatabaseAndTableInfos();
|
|
||||||
void collectTableInfo(const QualifiedTableName & table_name_in_backup, bool is_temporary_table, const std::optional<ASTs> & partitions);
|
void findDatabasesAndTablesInBackup();
|
||||||
void collectDatabaseInfo(const String & database_name_in_backup, const std::set<DatabaseAndTableName> & except_table_names, bool throw_if_no_database_metadata_in_backup);
|
void findTableInBackup(const QualifiedTableName & table_name_in_backup, const std::optional<ASTs> & partitions);
|
||||||
void collectAllDatabasesInfo(const std::set<String> & except_database_names, const std::set<DatabaseAndTableName> & except_table_names);
|
void findDatabaseInBackup(const String & database_name_in_backup, const std::set<DatabaseAndTableName> & except_table_names);
|
||||||
void checkAccessForCollectedInfos() const;
|
void findEverythingInBackup(const std::set<String> & except_database_names, const std::set<DatabaseAndTableName> & except_table_names);
|
||||||
|
|
||||||
|
void checkAccessForObjectsFoundInBackup() const;
|
||||||
|
|
||||||
void createDatabases();
|
void createDatabases();
|
||||||
void createTables();
|
void createTables();
|
||||||
|
|
||||||
|
DataRestoreTasks getDataRestoreTasks();
|
||||||
|
|
||||||
|
void setStatus(const String & new_status, const String & message = "");
|
||||||
|
|
||||||
struct DatabaseInfo
|
struct DatabaseInfo
|
||||||
{
|
{
|
||||||
ASTPtr create_database_query;
|
ASTPtr create_database_query;
|
||||||
|
bool is_predefined_database = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TableInfo
|
struct TableInfo
|
||||||
{
|
{
|
||||||
ASTPtr create_table_query;
|
ASTPtr create_table_query;
|
||||||
std::optional<ASTs> partitions;
|
bool is_predefined_table = false;
|
||||||
std::filesystem::path data_path_in_backup;
|
|
||||||
std::unordered_set<QualifiedTableName> dependencies;
|
std::unordered_set<QualifiedTableName> dependencies;
|
||||||
|
bool has_data = false;
|
||||||
|
std::filesystem::path data_path_in_backup;
|
||||||
|
std::optional<ASTs> partitions;
|
||||||
bool created = false;
|
bool created = false;
|
||||||
StoragePtr storage;
|
StoragePtr storage;
|
||||||
TableLockHolder table_lock;
|
TableLockHolder table_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TableKey
|
std::vector<QualifiedTableName> findTablesWithoutDependencies() const;
|
||||||
{
|
|
||||||
QualifiedTableName name;
|
|
||||||
bool is_temporary = false;
|
|
||||||
bool operator ==(const TableKey & right) const;
|
|
||||||
bool operator <(const TableKey & right) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<TableKey> findTablesWithoutDependencies() const;
|
|
||||||
|
|
||||||
|
String current_status;
|
||||||
std::unordered_map<String, DatabaseInfo> database_infos;
|
std::unordered_map<String, DatabaseInfo> database_infos;
|
||||||
std::map<TableKey, TableInfo> table_infos;
|
std::map<QualifiedTableName, TableInfo> table_infos;
|
||||||
std::vector<DataRestoreTask> data_restore_tasks;
|
std::vector<DataRestoreTask> data_restore_tasks;
|
||||||
std::shared_ptr<AccessRestoreTask> access_restore_task;
|
std::unique_ptr<AccessRestorerFromBackup> access_restorer;
|
||||||
|
bool access_restored = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -138,7 +138,7 @@ public:
|
|||||||
type_indexes.insert(TypeToTypeIndex<NearestFieldType<T>>);
|
type_indexes.insert(TypeToTypeIndex<NearestFieldType<T>>);
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr getScalarType() const { return getLeastSupertype(type_indexes, true); }
|
DataTypePtr getScalarType() const { return getLeastSupertypeOrString(type_indexes); }
|
||||||
bool haveNulls() const { return have_nulls; }
|
bool haveNulls() const { return have_nulls; }
|
||||||
bool needConvertField() const { return field_types.size() > 1; }
|
bool needConvertField() const { return field_types.size() > 1; }
|
||||||
|
|
||||||
@ -167,6 +167,7 @@ FieldInfo getFieldInfo(const Field & field)
|
|||||||
ColumnObject::Subcolumn::Subcolumn(MutableColumnPtr && data_, bool is_nullable_)
|
ColumnObject::Subcolumn::Subcolumn(MutableColumnPtr && data_, bool is_nullable_)
|
||||||
: least_common_type(getDataTypeByColumn(*data_))
|
: least_common_type(getDataTypeByColumn(*data_))
|
||||||
, is_nullable(is_nullable_)
|
, is_nullable(is_nullable_)
|
||||||
|
, num_rows(data_->size())
|
||||||
{
|
{
|
||||||
data.push_back(std::move(data_));
|
data.push_back(std::move(data_));
|
||||||
}
|
}
|
||||||
@ -176,15 +177,13 @@ ColumnObject::Subcolumn::Subcolumn(
|
|||||||
: least_common_type(std::make_shared<DataTypeNothing>())
|
: least_common_type(std::make_shared<DataTypeNothing>())
|
||||||
, is_nullable(is_nullable_)
|
, is_nullable(is_nullable_)
|
||||||
, num_of_defaults_in_prefix(size_)
|
, num_of_defaults_in_prefix(size_)
|
||||||
|
, num_rows(size_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ColumnObject::Subcolumn::size() const
|
size_t ColumnObject::Subcolumn::size() const
|
||||||
{
|
{
|
||||||
size_t res = num_of_defaults_in_prefix;
|
return num_rows;
|
||||||
for (const auto & part : data)
|
|
||||||
res += part->size();
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ColumnObject::Subcolumn::byteSize() const
|
size_t ColumnObject::Subcolumn::byteSize() const
|
||||||
@ -321,7 +320,7 @@ void ColumnObject::Subcolumn::insert(Field field, FieldInfo info)
|
|||||||
{
|
{
|
||||||
if (isConversionRequiredBetweenIntegers(*base_type, *least_common_base_type))
|
if (isConversionRequiredBetweenIntegers(*base_type, *least_common_base_type))
|
||||||
{
|
{
|
||||||
base_type = getLeastSupertype(DataTypes{std::move(base_type), least_common_base_type}, true);
|
base_type = getLeastSupertypeOrString(DataTypes{std::move(base_type), least_common_base_type});
|
||||||
type_changed = true;
|
type_changed = true;
|
||||||
if (!least_common_base_type->equals(*base_type))
|
if (!least_common_base_type->equals(*base_type))
|
||||||
addNewColumnPart(createArrayOfType(std::move(base_type), value_dim));
|
addNewColumnPart(createArrayOfType(std::move(base_type), value_dim));
|
||||||
@ -332,12 +331,14 @@ void ColumnObject::Subcolumn::insert(Field field, FieldInfo info)
|
|||||||
field = convertFieldToTypeOrThrow(field, *least_common_type.get());
|
field = convertFieldToTypeOrThrow(field, *least_common_type.get());
|
||||||
|
|
||||||
data.back()->insert(field);
|
data.back()->insert(field);
|
||||||
|
++num_rows;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnObject::Subcolumn::insertRangeFrom(const Subcolumn & src, size_t start, size_t length)
|
void ColumnObject::Subcolumn::insertRangeFrom(const Subcolumn & src, size_t start, size_t length)
|
||||||
{
|
{
|
||||||
assert(start + length <= src.size());
|
assert(start + length <= src.size());
|
||||||
size_t end = start + length;
|
size_t end = start + length;
|
||||||
|
num_rows += length;
|
||||||
|
|
||||||
if (data.empty())
|
if (data.empty())
|
||||||
{
|
{
|
||||||
@ -345,7 +346,7 @@ void ColumnObject::Subcolumn::insertRangeFrom(const Subcolumn & src, size_t star
|
|||||||
}
|
}
|
||||||
else if (!least_common_type.get()->equals(*src.getLeastCommonType()))
|
else if (!least_common_type.get()->equals(*src.getLeastCommonType()))
|
||||||
{
|
{
|
||||||
auto new_least_common_type = getLeastSupertype(DataTypes{least_common_type.get(), src.getLeastCommonType()}, true);
|
auto new_least_common_type = getLeastSupertypeOrString(DataTypes{least_common_type.get(), src.getLeastCommonType()});
|
||||||
if (!new_least_common_type->equals(*least_common_type.get()))
|
if (!new_least_common_type->equals(*least_common_type.get()))
|
||||||
addNewColumnPart(std::move(new_least_common_type));
|
addNewColumnPart(std::move(new_least_common_type));
|
||||||
}
|
}
|
||||||
@ -487,6 +488,8 @@ void ColumnObject::Subcolumn::insertDefault()
|
|||||||
++num_of_defaults_in_prefix;
|
++num_of_defaults_in_prefix;
|
||||||
else
|
else
|
||||||
data.back()->insertDefault();
|
data.back()->insertDefault();
|
||||||
|
|
||||||
|
++num_rows;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnObject::Subcolumn::insertManyDefaults(size_t length)
|
void ColumnObject::Subcolumn::insertManyDefaults(size_t length)
|
||||||
@ -495,12 +498,15 @@ void ColumnObject::Subcolumn::insertManyDefaults(size_t length)
|
|||||||
num_of_defaults_in_prefix += length;
|
num_of_defaults_in_prefix += length;
|
||||||
else
|
else
|
||||||
data.back()->insertManyDefaults(length);
|
data.back()->insertManyDefaults(length);
|
||||||
|
|
||||||
|
num_rows += length;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ColumnObject::Subcolumn::popBack(size_t n)
|
void ColumnObject::Subcolumn::popBack(size_t n)
|
||||||
{
|
{
|
||||||
assert(n <= size());
|
assert(n <= size());
|
||||||
|
|
||||||
|
num_rows -= n;
|
||||||
size_t num_removed = 0;
|
size_t num_removed = 0;
|
||||||
for (auto it = data.rbegin(); it != data.rend(); ++it)
|
for (auto it = data.rbegin(); it != data.rend(); ++it)
|
||||||
{
|
{
|
||||||
@ -559,15 +565,11 @@ ColumnObject::Subcolumn ColumnObject::Subcolumn::recreateWithDefaultValues(const
|
|||||||
if (is_nullable)
|
if (is_nullable)
|
||||||
scalar_type = makeNullable(scalar_type);
|
scalar_type = makeNullable(scalar_type);
|
||||||
|
|
||||||
Subcolumn new_subcolumn;
|
Subcolumn new_subcolumn(*this);
|
||||||
new_subcolumn.least_common_type = LeastCommonType{createArrayOfType(scalar_type, field_info.num_dimensions)};
|
new_subcolumn.least_common_type = LeastCommonType{createArrayOfType(scalar_type, field_info.num_dimensions)};
|
||||||
new_subcolumn.is_nullable = is_nullable;
|
|
||||||
new_subcolumn.num_of_defaults_in_prefix = num_of_defaults_in_prefix;
|
|
||||||
new_subcolumn.data.reserve(data.size());
|
|
||||||
|
|
||||||
for (const auto & part : data)
|
for (auto & part : new_subcolumn.data)
|
||||||
new_subcolumn.data.push_back(recreateColumnWithDefaultValues(
|
part = recreateColumnWithDefaultValues(part, scalar_type, field_info.num_dimensions);
|
||||||
part, scalar_type, field_info.num_dimensions));
|
|
||||||
|
|
||||||
return new_subcolumn;
|
return new_subcolumn;
|
||||||
}
|
}
|
||||||
|
@ -146,6 +146,8 @@ public:
|
|||||||
/// least common type and we count number of defaults in prefix,
|
/// least common type and we count number of defaults in prefix,
|
||||||
/// which will be converted to the default type of final common type.
|
/// which will be converted to the default type of final common type.
|
||||||
size_t num_of_defaults_in_prefix = 0;
|
size_t num_of_defaults_in_prefix = 0;
|
||||||
|
|
||||||
|
size_t num_rows = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
using Subcolumns = SubcolumnsTree<Subcolumn>;
|
using Subcolumns = SubcolumnsTree<Subcolumn>;
|
||||||
|
@ -489,10 +489,10 @@ ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_s
|
|||||||
const T * data_pos = data.data();
|
const T * data_pos = data.data();
|
||||||
|
|
||||||
/** A slightly more optimized version.
|
/** A slightly more optimized version.
|
||||||
* Based on the assumption that often pieces of consecutive values
|
* Based on the assumption that often pieces of consecutive values
|
||||||
* completely pass or do not pass the filter.
|
* completely pass or do not pass the filter.
|
||||||
* Therefore, we will optimistically check the parts of `SIMD_BYTES` values.
|
* Therefore, we will optimistically check the parts of `SIMD_BYTES` values.
|
||||||
*/
|
*/
|
||||||
static constexpr size_t SIMD_BYTES = 64;
|
static constexpr size_t SIMD_BYTES = 64;
|
||||||
const UInt8 * filt_end_aligned = filt_pos + size / SIMD_BYTES * SIMD_BYTES;
|
const UInt8 * filt_end_aligned = filt_pos + size / SIMD_BYTES * SIMD_BYTES;
|
||||||
|
|
||||||
@ -577,6 +577,115 @@ ColumnPtr ColumnVector<T>::index(const IColumn & indexes, size_t limit) const
|
|||||||
return selectIndexImpl(*this, indexes, limit);
|
return selectIndexImpl(*this, indexes, limit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef __SSE2__
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
/** Optimization for ColumnVector replicate using SIMD instructions.
|
||||||
|
* For such optimization it is important that data is right padded with 15 bytes.
|
||||||
|
*
|
||||||
|
* Replicate span size is offsets[i] - offsets[i - 1].
|
||||||
|
*
|
||||||
|
* Split spans into 3 categories.
|
||||||
|
* 1. Span with 0 size. Continue iteration.
|
||||||
|
*
|
||||||
|
* 2. Span with 1 size. Update pointer from which data must be copied into result.
|
||||||
|
* Then if we see span with size 1 or greater than 1 copy data directly into result data and reset pointer.
|
||||||
|
* Example:
|
||||||
|
* Data: 1 2 3 4
|
||||||
|
* Offsets: 1 2 3 4
|
||||||
|
* Result data: 1 2 3 4
|
||||||
|
*
|
||||||
|
* 3. Span with size greater than 1. Save single data element into register and copy it into result data.
|
||||||
|
* Example:
|
||||||
|
* Data: 1 2 3 4
|
||||||
|
* Offsets: 4 4 4 4
|
||||||
|
* Result data: 1 1 1 1
|
||||||
|
*
|
||||||
|
* Additional handling for tail is needed if pointer from which data must be copied from span with size 1 is not null.
|
||||||
|
*/
|
||||||
|
template<typename IntType>
|
||||||
|
requires (std::is_same_v<IntType, Int32> || std::is_same_v<IntType, UInt32>)
|
||||||
|
void replicateSSE42Int32(const IntType * __restrict data, IntType * __restrict result_data, const IColumn::Offsets & offsets)
|
||||||
|
{
|
||||||
|
const IntType * data_copy_begin_ptr = nullptr;
|
||||||
|
size_t offsets_size = offsets.size();
|
||||||
|
|
||||||
|
for (size_t offset_index = 0; offset_index < offsets_size; ++offset_index)
|
||||||
|
{
|
||||||
|
size_t span = offsets[offset_index] - offsets[offset_index - 1];
|
||||||
|
if (span == 1)
|
||||||
|
{
|
||||||
|
if (!data_copy_begin_ptr)
|
||||||
|
data_copy_begin_ptr = data + offset_index;
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Copy data
|
||||||
|
|
||||||
|
if (data_copy_begin_ptr)
|
||||||
|
{
|
||||||
|
size_t copy_size = (data + offset_index) - data_copy_begin_ptr;
|
||||||
|
bool remainder = copy_size % 4;
|
||||||
|
size_t sse_copy_counter = (copy_size / 4) + remainder;
|
||||||
|
auto * result_data_copy = result_data;
|
||||||
|
|
||||||
|
while (sse_copy_counter)
|
||||||
|
{
|
||||||
|
__m128i copy_batch = _mm_loadu_si128(reinterpret_cast<const __m128i *>(data_copy_begin_ptr));
|
||||||
|
_mm_storeu_si128(reinterpret_cast<__m128i *>(result_data_copy), copy_batch);
|
||||||
|
result_data_copy += 4;
|
||||||
|
data_copy_begin_ptr += 4;
|
||||||
|
--sse_copy_counter;
|
||||||
|
}
|
||||||
|
|
||||||
|
result_data += copy_size;
|
||||||
|
data_copy_begin_ptr = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (span == 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/// Copy single data element into result data
|
||||||
|
|
||||||
|
bool span_remainder = span % 4;
|
||||||
|
size_t copy_counter = (span / 4) + span_remainder;
|
||||||
|
auto * result_data_tmp = result_data;
|
||||||
|
__m128i copy_element_data = _mm_set1_epi32(data[offset_index]);
|
||||||
|
|
||||||
|
while (copy_counter)
|
||||||
|
{
|
||||||
|
_mm_storeu_si128(reinterpret_cast<__m128i *>(result_data_tmp), copy_element_data);
|
||||||
|
result_data_tmp += 4;
|
||||||
|
--copy_counter;
|
||||||
|
}
|
||||||
|
|
||||||
|
result_data += span;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Copy tail if needed
|
||||||
|
|
||||||
|
if (data_copy_begin_ptr)
|
||||||
|
{
|
||||||
|
size_t copy_size = (data + offsets_size) - data_copy_begin_ptr;
|
||||||
|
bool remainder = copy_size % 4;
|
||||||
|
size_t sse_copy_counter = (copy_size / 4) + remainder;
|
||||||
|
|
||||||
|
while (sse_copy_counter)
|
||||||
|
{
|
||||||
|
__m128i copy_batch = _mm_loadu_si128(reinterpret_cast<const __m128i *>(data_copy_begin_ptr));
|
||||||
|
_mm_storeu_si128(reinterpret_cast<__m128i *>(result_data), copy_batch);
|
||||||
|
result_data += 4;
|
||||||
|
data_copy_begin_ptr += 4;
|
||||||
|
--sse_copy_counter;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
ColumnPtr ColumnVector<T>::replicate(const IColumn::Offsets & offsets) const
|
ColumnPtr ColumnVector<T>::replicate(const IColumn::Offsets & offsets) const
|
||||||
{
|
{
|
||||||
@ -589,6 +698,14 @@ ColumnPtr ColumnVector<T>::replicate(const IColumn::Offsets & offsets) const
|
|||||||
|
|
||||||
auto res = this->create(offsets.back());
|
auto res = this->create(offsets.back());
|
||||||
|
|
||||||
|
#ifdef __SSE2__
|
||||||
|
if constexpr (std::is_same_v<T, UInt32>)
|
||||||
|
{
|
||||||
|
replicateSSE42Int32(getData().data(), res->getData().data(), offsets);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
auto it = res->getData().begin(); // NOLINT
|
auto it = res->getData().begin(); // NOLINT
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
|
@ -89,7 +89,7 @@ TEST(ColumnObject, InsertRangeFrom)
|
|||||||
|
|
||||||
const auto & type_dst = subcolumn_dst.getLeastCommonType();
|
const auto & type_dst = subcolumn_dst.getLeastCommonType();
|
||||||
const auto & type_src = subcolumn_src.getLeastCommonType();
|
const auto & type_src = subcolumn_src.getLeastCommonType();
|
||||||
auto type_res = getLeastSupertype(DataTypes{type_dst, type_src}, true);
|
auto type_res = getLeastSupertypeOrString(DataTypes{type_dst, type_src});
|
||||||
|
|
||||||
size_t from = rng() % subcolumn_src.size();
|
size_t from = rng() % subcolumn_src.size();
|
||||||
size_t to = rng() % subcolumn_src.size();
|
size_t to = rng() % subcolumn_src.size();
|
||||||
|
@ -631,7 +631,7 @@
|
|||||||
M(660, HDFS_ERROR) \
|
M(660, HDFS_ERROR) \
|
||||||
M(661, CANNOT_SEND_SIGNAL) \
|
M(661, CANNOT_SEND_SIGNAL) \
|
||||||
M(662, FS_METADATA_ERROR) \
|
M(662, FS_METADATA_ERROR) \
|
||||||
M(663, CANNOT_COLLECT_OBJECTS_FOR_BACKUP) \
|
M(663, INCONSISTENT_METADATA_FOR_BACKUP) \
|
||||||
M(664, ACCESS_STORAGE_DOESNT_ALLOW_BACKUP) \
|
M(664, ACCESS_STORAGE_DOESNT_ALLOW_BACKUP) \
|
||||||
\
|
\
|
||||||
M(999, KEEPER_EXCEPTION) \
|
M(999, KEEPER_EXCEPTION) \
|
||||||
|
@ -9,13 +9,13 @@ namespace ErrorCodes
|
|||||||
extern const int SYNTAX_ERROR;
|
extern const int SYNTAX_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
Float64 IntervalKind::toAvgSeconds() const
|
Int32 IntervalKind::toAvgSeconds() const
|
||||||
{
|
{
|
||||||
switch (kind)
|
switch (kind)
|
||||||
{
|
{
|
||||||
case IntervalKind::Nanosecond: return 0.000000001;
|
case IntervalKind::Nanosecond:
|
||||||
case IntervalKind::Microsecond: return 0.000001;
|
case IntervalKind::Microsecond:
|
||||||
case IntervalKind::Millisecond: return 0.001;
|
case IntervalKind::Millisecond: return 0; /// fractional parts of seconds have 0 seconds
|
||||||
case IntervalKind::Second: return 1;
|
case IntervalKind::Second: return 1;
|
||||||
case IntervalKind::Minute: return 60;
|
case IntervalKind::Minute: return 60;
|
||||||
case IntervalKind::Hour: return 3600;
|
case IntervalKind::Hour: return 3600;
|
||||||
@ -28,25 +28,6 @@ Float64 IntervalKind::toAvgSeconds() const
|
|||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IntervalKind::isFixedLength() const
|
|
||||||
{
|
|
||||||
switch (kind)
|
|
||||||
{
|
|
||||||
case IntervalKind::Nanosecond:
|
|
||||||
case IntervalKind::Microsecond:
|
|
||||||
case IntervalKind::Millisecond:
|
|
||||||
case IntervalKind::Second:
|
|
||||||
case IntervalKind::Minute:
|
|
||||||
case IntervalKind::Hour:
|
|
||||||
case IntervalKind::Day:
|
|
||||||
case IntervalKind::Week: return true;
|
|
||||||
case IntervalKind::Month:
|
|
||||||
case IntervalKind::Quarter:
|
|
||||||
case IntervalKind::Year: return false;
|
|
||||||
}
|
|
||||||
__builtin_unreachable();
|
|
||||||
}
|
|
||||||
|
|
||||||
IntervalKind IntervalKind::fromAvgSeconds(Int64 num_seconds)
|
IntervalKind IntervalKind::fromAvgSeconds(Int64 num_seconds)
|
||||||
{
|
{
|
||||||
if (num_seconds)
|
if (num_seconds)
|
||||||
|
@ -31,15 +31,12 @@ struct IntervalKind
|
|||||||
|
|
||||||
/// Returns number of seconds in one interval.
|
/// Returns number of seconds in one interval.
|
||||||
/// For `Month`, `Quarter` and `Year` the function returns an average number of seconds.
|
/// For `Month`, `Quarter` and `Year` the function returns an average number of seconds.
|
||||||
Float64 toAvgSeconds() const;
|
Int32 toAvgSeconds() const;
|
||||||
|
|
||||||
/// Chooses an interval kind based on number of seconds.
|
/// Chooses an interval kind based on number of seconds.
|
||||||
/// For example, `IntervalKind::fromAvgSeconds(3600)` returns `IntervalKind::Hour`.
|
/// For example, `IntervalKind::fromAvgSeconds(3600)` returns `IntervalKind::Hour`.
|
||||||
static IntervalKind fromAvgSeconds(Int64 num_seconds);
|
static IntervalKind fromAvgSeconds(Int64 num_seconds);
|
||||||
|
|
||||||
/// Returns whether IntervalKind has a fixed number of seconds (e.g. Day) or non-fixed(e.g. Month)
|
|
||||||
bool isFixedLength() const;
|
|
||||||
|
|
||||||
/// Returns an uppercased version of what `toString()` returns.
|
/// Returns an uppercased version of what `toString()` returns.
|
||||||
const char * toKeyword() const;
|
const char * toKeyword() const;
|
||||||
|
|
||||||
|
@ -826,66 +826,44 @@ using UTF8CaseInsensitiveStringSearcher = StringSearcher<false, false>;
|
|||||||
using ASCIICaseSensitiveTokenSearcher = TokenSearcher<ASCIICaseSensitiveStringSearcher>;
|
using ASCIICaseSensitiveTokenSearcher = TokenSearcher<ASCIICaseSensitiveStringSearcher>;
|
||||||
using ASCIICaseInsensitiveTokenSearcher = TokenSearcher<ASCIICaseInsensitiveStringSearcher>;
|
using ASCIICaseInsensitiveTokenSearcher = TokenSearcher<ASCIICaseInsensitiveStringSearcher>;
|
||||||
|
|
||||||
|
/// Use only with short haystacks where cheap initialization is required.
|
||||||
/** Uses functions from libc.
|
template <bool CaseInsensitive>
|
||||||
* It makes sense to use only with short haystacks when cheap initialization is required.
|
struct StdLibASCIIStringSearcher : public StringSearcherBase
|
||||||
* There is no option for case-insensitive search for UTF-8 strings.
|
|
||||||
* It is required that strings are zero-terminated.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct LibCASCIICaseSensitiveStringSearcher : public StringSearcherBase
|
|
||||||
{
|
{
|
||||||
const char * const needle;
|
const char * const needle_start;
|
||||||
|
const char * const needle_end;
|
||||||
|
|
||||||
template <typename CharT>
|
template <typename CharT>
|
||||||
requires (sizeof(CharT) == 1)
|
requires (sizeof(CharT) == 1)
|
||||||
LibCASCIICaseSensitiveStringSearcher(const CharT * const needle_, const size_t /* needle_size */)
|
StdLibASCIIStringSearcher(const CharT * const needle_start_, const size_t needle_size_)
|
||||||
: needle(reinterpret_cast<const char *>(needle_)) {}
|
: needle_start{reinterpret_cast<const char *>(needle_start_)}
|
||||||
|
, needle_end{reinterpret_cast<const char *>(needle_start) + needle_size_}
|
||||||
|
{}
|
||||||
|
|
||||||
template <typename CharT>
|
template <typename CharT>
|
||||||
requires (sizeof(CharT) == 1)
|
requires (sizeof(CharT) == 1)
|
||||||
const CharT * search(const CharT * haystack, const CharT * const haystack_end) const
|
const CharT * search(const CharT * haystack_start, const CharT * const haystack_end) const
|
||||||
{
|
{
|
||||||
const auto * res = strstr(reinterpret_cast<const char *>(haystack), reinterpret_cast<const char *>(needle));
|
if constexpr (CaseInsensitive)
|
||||||
if (!res)
|
{
|
||||||
return haystack_end;
|
return std::search(
|
||||||
return reinterpret_cast<const CharT *>(res);
|
haystack_start, haystack_end, needle_start, needle_end,
|
||||||
|
[](char c1, char c2) {return std::toupper(c1) == std::toupper(c2);});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return std::search(
|
||||||
|
haystack_start, haystack_end, needle_start, needle_end,
|
||||||
|
[](char c1, char c2) {return c1 == c2;});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename CharT>
|
template <typename CharT>
|
||||||
requires (sizeof(CharT) == 1)
|
requires (sizeof(CharT) == 1)
|
||||||
const CharT * search(const CharT * haystack, const size_t haystack_size) const
|
const CharT * search(const CharT * haystack_start, const size_t haystack_length) const
|
||||||
{
|
{
|
||||||
return search(haystack, haystack + haystack_size);
|
return search(haystack_start, haystack_start + haystack_length);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct LibCASCIICaseInsensitiveStringSearcher : public StringSearcherBase
|
|
||||||
{
|
|
||||||
const char * const needle;
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
LibCASCIICaseInsensitiveStringSearcher(const CharT * const needle_, const size_t /* needle_size */)
|
|
||||||
: needle(reinterpret_cast<const char *>(needle_)) {}
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
const CharT * search(const CharT * haystack, const CharT * const haystack_end) const
|
|
||||||
{
|
|
||||||
const auto * res = strcasestr(reinterpret_cast<const char *>(haystack), reinterpret_cast<const char *>(needle));
|
|
||||||
if (!res)
|
|
||||||
return haystack_end;
|
|
||||||
return reinterpret_cast<const CharT *>(res);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename CharT>
|
|
||||||
requires (sizeof(CharT) == 1)
|
|
||||||
const CharT * search(const CharT * haystack, const size_t haystack_size) const
|
|
||||||
{
|
|
||||||
return search(haystack, haystack + haystack_size);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -22,13 +22,14 @@ namespace ErrorCodes
|
|||||||
extern const int EMPTY_DATA_PASSED;
|
extern const int EMPTY_DATA_PASSED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <LeastSupertypeOnError on_error>
|
||||||
DataTypePtr FieldToDataType::operator() (const Null &) const
|
DataTypePtr FieldToDataType<on_error>::operator() (const Null &) const
|
||||||
{
|
{
|
||||||
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>());
|
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>());
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const UInt64 & x) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const UInt64 & x) const
|
||||||
{
|
{
|
||||||
if (x <= std::numeric_limits<UInt8>::max()) return std::make_shared<DataTypeUInt8>();
|
if (x <= std::numeric_limits<UInt8>::max()) return std::make_shared<DataTypeUInt8>();
|
||||||
if (x <= std::numeric_limits<UInt16>::max()) return std::make_shared<DataTypeUInt16>();
|
if (x <= std::numeric_limits<UInt16>::max()) return std::make_shared<DataTypeUInt16>();
|
||||||
@ -36,7 +37,8 @@ DataTypePtr FieldToDataType::operator() (const UInt64 & x) const
|
|||||||
return std::make_shared<DataTypeUInt64>();
|
return std::make_shared<DataTypeUInt64>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const Int64 & x) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const Int64 & x) const
|
||||||
{
|
{
|
||||||
if (x <= std::numeric_limits<Int8>::max() && x >= std::numeric_limits<Int8>::min()) return std::make_shared<DataTypeInt8>();
|
if (x <= std::numeric_limits<Int8>::max() && x >= std::numeric_limits<Int8>::min()) return std::make_shared<DataTypeInt8>();
|
||||||
if (x <= std::numeric_limits<Int16>::max() && x >= std::numeric_limits<Int16>::min()) return std::make_shared<DataTypeInt16>();
|
if (x <= std::numeric_limits<Int16>::max() && x >= std::numeric_limits<Int16>::min()) return std::make_shared<DataTypeInt16>();
|
||||||
@ -44,77 +46,90 @@ DataTypePtr FieldToDataType::operator() (const Int64 & x) const
|
|||||||
return std::make_shared<DataTypeInt64>();
|
return std::make_shared<DataTypeInt64>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const Float64 &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const Float64 &) const
|
||||||
{
|
{
|
||||||
return std::make_shared<DataTypeFloat64>();
|
return std::make_shared<DataTypeFloat64>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const UInt128 &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const UInt128 &) const
|
||||||
{
|
{
|
||||||
return std::make_shared<DataTypeUInt128>();
|
return std::make_shared<DataTypeUInt128>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const Int128 &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const Int128 &) const
|
||||||
{
|
{
|
||||||
return std::make_shared<DataTypeInt128>();
|
return std::make_shared<DataTypeInt128>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const UInt256 &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const UInt256 &) const
|
||||||
{
|
{
|
||||||
return std::make_shared<DataTypeUInt256>();
|
return std::make_shared<DataTypeUInt256>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const Int256 &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const Int256 &) const
|
||||||
{
|
{
|
||||||
return std::make_shared<DataTypeInt256>();
|
return std::make_shared<DataTypeInt256>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const UUID &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const UUID &) const
|
||||||
{
|
{
|
||||||
return std::make_shared<DataTypeUUID>();
|
return std::make_shared<DataTypeUUID>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const String &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const String &) const
|
||||||
{
|
{
|
||||||
return std::make_shared<DataTypeString>();
|
return std::make_shared<DataTypeString>();
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const DecimalField<Decimal32> & x) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const DecimalField<Decimal32> & x) const
|
||||||
{
|
{
|
||||||
using Type = DataTypeDecimal<Decimal32>;
|
using Type = DataTypeDecimal<Decimal32>;
|
||||||
return std::make_shared<Type>(Type::maxPrecision(), x.getScale());
|
return std::make_shared<Type>(Type::maxPrecision(), x.getScale());
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const DecimalField<Decimal64> & x) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const DecimalField<Decimal64> & x) const
|
||||||
{
|
{
|
||||||
using Type = DataTypeDecimal<Decimal64>;
|
using Type = DataTypeDecimal<Decimal64>;
|
||||||
return std::make_shared<Type>(Type::maxPrecision(), x.getScale());
|
return std::make_shared<Type>(Type::maxPrecision(), x.getScale());
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const DecimalField<Decimal128> & x) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const DecimalField<Decimal128> & x) const
|
||||||
{
|
{
|
||||||
using Type = DataTypeDecimal<Decimal128>;
|
using Type = DataTypeDecimal<Decimal128>;
|
||||||
return std::make_shared<Type>(Type::maxPrecision(), x.getScale());
|
return std::make_shared<Type>(Type::maxPrecision(), x.getScale());
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const DecimalField<Decimal256> & x) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const DecimalField<Decimal256> & x) const
|
||||||
{
|
{
|
||||||
using Type = DataTypeDecimal<Decimal256>;
|
using Type = DataTypeDecimal<Decimal256>;
|
||||||
return std::make_shared<Type>(Type::maxPrecision(), x.getScale());
|
return std::make_shared<Type>(Type::maxPrecision(), x.getScale());
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const Array & x) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const Array & x) const
|
||||||
{
|
{
|
||||||
DataTypes element_types;
|
DataTypes element_types;
|
||||||
element_types.reserve(x.size());
|
element_types.reserve(x.size());
|
||||||
|
|
||||||
for (const Field & elem : x)
|
for (const Field & elem : x)
|
||||||
element_types.emplace_back(applyVisitor(FieldToDataType(allow_convertion_to_string), elem));
|
element_types.emplace_back(applyVisitor(*this, elem));
|
||||||
|
|
||||||
return std::make_shared<DataTypeArray>(getLeastSupertype(element_types, allow_convertion_to_string));
|
return std::make_shared<DataTypeArray>(getLeastSupertype<on_error>(element_types));
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const Tuple & tuple) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const Tuple & tuple) const
|
||||||
{
|
{
|
||||||
if (tuple.empty())
|
if (tuple.empty())
|
||||||
throw Exception("Cannot infer type of an empty tuple", ErrorCodes::EMPTY_DATA_PASSED);
|
throw Exception("Cannot infer type of an empty tuple", ErrorCodes::EMPTY_DATA_PASSED);
|
||||||
@ -123,12 +138,13 @@ DataTypePtr FieldToDataType::operator() (const Tuple & tuple) const
|
|||||||
element_types.reserve(tuple.size());
|
element_types.reserve(tuple.size());
|
||||||
|
|
||||||
for (const auto & element : tuple)
|
for (const auto & element : tuple)
|
||||||
element_types.push_back(applyVisitor(FieldToDataType(allow_convertion_to_string), element));
|
element_types.push_back(applyVisitor(*this, element));
|
||||||
|
|
||||||
return std::make_shared<DataTypeTuple>(element_types);
|
return std::make_shared<DataTypeTuple>(element_types);
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const Map & map) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const Map & map) const
|
||||||
{
|
{
|
||||||
DataTypes key_types;
|
DataTypes key_types;
|
||||||
DataTypes value_types;
|
DataTypes value_types;
|
||||||
@ -139,30 +155,37 @@ DataTypePtr FieldToDataType::operator() (const Map & map) const
|
|||||||
{
|
{
|
||||||
const auto & tuple = elem.safeGet<const Tuple &>();
|
const auto & tuple = elem.safeGet<const Tuple &>();
|
||||||
assert(tuple.size() == 2);
|
assert(tuple.size() == 2);
|
||||||
key_types.push_back(applyVisitor(FieldToDataType(allow_convertion_to_string), tuple[0]));
|
key_types.push_back(applyVisitor(*this, tuple[0]));
|
||||||
value_types.push_back(applyVisitor(FieldToDataType(allow_convertion_to_string), tuple[1]));
|
value_types.push_back(applyVisitor(*this, tuple[1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<DataTypeMap>(
|
return std::make_shared<DataTypeMap>(
|
||||||
getLeastSupertype(key_types, allow_convertion_to_string),
|
getLeastSupertype<on_error>(key_types),
|
||||||
getLeastSupertype(value_types, allow_convertion_to_string));
|
getLeastSupertype<on_error>(value_types));
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const Object &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const Object &) const
|
||||||
{
|
{
|
||||||
/// TODO: Do we need different parameters for type Object?
|
/// TODO: Do we need different parameters for type Object?
|
||||||
return std::make_shared<DataTypeObject>("json", false);
|
return std::make_shared<DataTypeObject>("json", false);
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator() (const AggregateFunctionStateData & x) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator() (const AggregateFunctionStateData & x) const
|
||||||
{
|
{
|
||||||
const auto & name = static_cast<const AggregateFunctionStateData &>(x).name;
|
const auto & name = static_cast<const AggregateFunctionStateData &>(x).name;
|
||||||
return DataTypeFactory::instance().get(name);
|
return DataTypeFactory::instance().get(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr FieldToDataType::operator()(const bool &) const
|
template <LeastSupertypeOnError on_error>
|
||||||
|
DataTypePtr FieldToDataType<on_error>::operator()(const bool &) const
|
||||||
{
|
{
|
||||||
return DataTypeFactory::instance().get("Bool");
|
return DataTypeFactory::instance().get("Bool");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template class FieldToDataType<LeastSupertypeOnError::Throw>;
|
||||||
|
template class FieldToDataType<LeastSupertypeOnError::String>;
|
||||||
|
template class FieldToDataType<LeastSupertypeOnError::Null>;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
#include <Common/FieldVisitors.h>
|
#include <Common/FieldVisitors.h>
|
||||||
|
#include <DataTypes/getLeastSupertype.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -17,14 +18,10 @@ using DataTypePtr = std::shared_ptr<const IDataType>;
|
|||||||
* Note that you still have to convert Field to corresponding data type before inserting to columns
|
* Note that you still have to convert Field to corresponding data type before inserting to columns
|
||||||
* (for example, this is necessary to convert elements of Array to common type).
|
* (for example, this is necessary to convert elements of Array to common type).
|
||||||
*/
|
*/
|
||||||
|
template <LeastSupertypeOnError on_error = LeastSupertypeOnError::Throw>
|
||||||
class FieldToDataType : public StaticVisitor<DataTypePtr>
|
class FieldToDataType : public StaticVisitor<DataTypePtr>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
FieldToDataType(bool allow_convertion_to_string_ = false)
|
|
||||||
: allow_convertion_to_string(allow_convertion_to_string_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
DataTypePtr operator() (const Null & x) const;
|
DataTypePtr operator() (const Null & x) const;
|
||||||
DataTypePtr operator() (const UInt64 & x) const;
|
DataTypePtr operator() (const UInt64 & x) const;
|
||||||
DataTypePtr operator() (const UInt128 & x) const;
|
DataTypePtr operator() (const UInt128 & x) const;
|
||||||
@ -45,9 +42,6 @@ public:
|
|||||||
DataTypePtr operator() (const UInt256 & x) const;
|
DataTypePtr operator() (const UInt256 & x) const;
|
||||||
DataTypePtr operator() (const Int256 & x) const;
|
DataTypePtr operator() (const Int256 & x) const;
|
||||||
DataTypePtr operator() (const bool & x) const;
|
DataTypePtr operator() (const bool & x) const;
|
||||||
|
|
||||||
private:
|
|
||||||
bool allow_convertion_to_string;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -565,4 +565,31 @@ class DataTypeEnum;
|
|||||||
|
|
||||||
template <typename T> inline constexpr bool IsDataTypeEnum<DataTypeEnum<T>> = true;
|
template <typename T> inline constexpr bool IsDataTypeEnum<DataTypeEnum<T>> = true;
|
||||||
|
|
||||||
|
#define FOR_BASIC_NUMERIC_TYPES(M) \
|
||||||
|
M(UInt8) \
|
||||||
|
M(UInt16) \
|
||||||
|
M(UInt32) \
|
||||||
|
M(UInt64) \
|
||||||
|
M(Int8) \
|
||||||
|
M(Int16) \
|
||||||
|
M(Int32) \
|
||||||
|
M(Int64) \
|
||||||
|
M(Float32) \
|
||||||
|
M(Float64)
|
||||||
|
|
||||||
|
#define FOR_NUMERIC_TYPES(M) \
|
||||||
|
M(UInt8) \
|
||||||
|
M(UInt16) \
|
||||||
|
M(UInt32) \
|
||||||
|
M(UInt64) \
|
||||||
|
M(UInt128) \
|
||||||
|
M(UInt256) \
|
||||||
|
M(Int8) \
|
||||||
|
M(Int16) \
|
||||||
|
M(Int32) \
|
||||||
|
M(Int64) \
|
||||||
|
M(Int128) \
|
||||||
|
M(Int256) \
|
||||||
|
M(Float32) \
|
||||||
|
M(Float64)
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
|
#include "Columns/IColumn.h"
|
||||||
|
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/DataTypeTuple.h>
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
@ -17,7 +19,6 @@
|
|||||||
|
|
||||||
#include <boost/algorithm/string/case_conv.hpp>
|
#include <boost/algorithm/string/case_conv.hpp>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -76,8 +77,7 @@ Block flatten(const Block & block)
|
|||||||
|
|
||||||
for (const auto & elem : block)
|
for (const auto & elem : block)
|
||||||
{
|
{
|
||||||
const DataTypeArray * type_arr = typeid_cast<const DataTypeArray *>(elem.type.get());
|
if (const DataTypeArray * type_arr = typeid_cast<const DataTypeArray *>(elem.type.get()))
|
||||||
if (type_arr)
|
|
||||||
{
|
{
|
||||||
const DataTypeTuple * type_tuple = typeid_cast<const DataTypeTuple *>(type_arr->getNestedType().get());
|
const DataTypeTuple * type_tuple = typeid_cast<const DataTypeTuple *>(type_arr->getNestedType().get());
|
||||||
if (type_tuple && type_tuple->haveExplicitNames())
|
if (type_tuple && type_tuple->haveExplicitNames())
|
||||||
@ -106,7 +106,7 @@ Block flatten(const Block & block)
|
|||||||
res.insert(ColumnWithTypeAndName(
|
res.insert(ColumnWithTypeAndName(
|
||||||
is_const
|
is_const
|
||||||
? ColumnConst::create(std::move(column_array_of_element), block.rows())
|
? ColumnConst::create(std::move(column_array_of_element), block.rows())
|
||||||
: std::move(column_array_of_element),
|
: column_array_of_element,
|
||||||
std::make_shared<DataTypeArray>(element_types[i]),
|
std::make_shared<DataTypeArray>(element_types[i]),
|
||||||
nested_name));
|
nested_name));
|
||||||
}
|
}
|
||||||
@ -114,6 +114,28 @@ Block flatten(const Block & block)
|
|||||||
else
|
else
|
||||||
res.insert(elem);
|
res.insert(elem);
|
||||||
}
|
}
|
||||||
|
else if (const DataTypeTuple * type_tuple = typeid_cast<const DataTypeTuple *>(elem.type.get()))
|
||||||
|
{
|
||||||
|
if (type_tuple->haveExplicitNames())
|
||||||
|
{
|
||||||
|
const DataTypes & element_types = type_tuple->getElements();
|
||||||
|
const Strings & names = type_tuple->getElementNames();
|
||||||
|
const ColumnTuple * column_tuple;
|
||||||
|
if (isColumnConst(*elem.column))
|
||||||
|
column_tuple = typeid_cast<const ColumnTuple *>(&assert_cast<const ColumnConst &>(*elem.column).getDataColumn());
|
||||||
|
else
|
||||||
|
column_tuple = typeid_cast<const ColumnTuple *>(elem.column.get());
|
||||||
|
size_t tuple_size = column_tuple->tupleSize();
|
||||||
|
for (size_t i = 0; i < tuple_size; ++i)
|
||||||
|
{
|
||||||
|
const auto & element_column = column_tuple->getColumn(i);
|
||||||
|
String nested_name = concatenateName(elem.name, names[i]);
|
||||||
|
res.insert(ColumnWithTypeAndName(element_column.getPtr(), element_types[i], nested_name));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
res.insert(elem);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
res.insert(elem);
|
res.insert(elem);
|
||||||
}
|
}
|
||||||
@ -243,7 +265,71 @@ std::unordered_set<String> getAllTableNames(const Block & block, bool to_lower_c
|
|||||||
}
|
}
|
||||||
return nested_table_names;
|
return nested_table_names;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NestedColumnExtractHelper::NestedColumnExtractHelper(const Block & block_, bool case_insentive_)
|
||||||
|
: block(block_)
|
||||||
|
, case_insentive(case_insentive_)
|
||||||
|
{}
|
||||||
|
|
||||||
|
std::optional<ColumnWithTypeAndName> NestedColumnExtractHelper::extractColumn(const String & column_name)
|
||||||
|
{
|
||||||
|
if (block.has(column_name, case_insentive))
|
||||||
|
return {block.getByName(column_name, case_insentive)};
|
||||||
|
|
||||||
|
auto nested_names = Nested::splitName(column_name);
|
||||||
|
if (case_insentive)
|
||||||
|
{
|
||||||
|
boost::to_lower(nested_names.first);
|
||||||
|
boost::to_lower(nested_names.second);
|
||||||
|
}
|
||||||
|
if (!block.has(nested_names.first, case_insentive))
|
||||||
|
return {};
|
||||||
|
|
||||||
|
if (!nested_tables.contains(nested_names.first))
|
||||||
|
{
|
||||||
|
ColumnsWithTypeAndName columns = {block.getByName(nested_names.first, case_insentive)};
|
||||||
|
nested_tables[nested_names.first] = std::make_shared<Block>(Nested::flatten(columns));
|
||||||
|
}
|
||||||
|
|
||||||
|
return extractColumn(column_name, nested_names.first, nested_names.second);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<ColumnWithTypeAndName> NestedColumnExtractHelper::extractColumn(
|
||||||
|
const String & original_column_name, const String & column_name_prefix, const String & column_name_suffix)
|
||||||
|
{
|
||||||
|
auto table_iter = nested_tables.find(column_name_prefix);
|
||||||
|
if (table_iter == nested_tables.end())
|
||||||
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
auto & nested_table = table_iter->second;
|
||||||
|
auto nested_names = Nested::splitName(column_name_suffix);
|
||||||
|
auto new_column_name_prefix = Nested::concatenateName(column_name_prefix, nested_names.first);
|
||||||
|
if (nested_names.second.empty())
|
||||||
|
{
|
||||||
|
if (auto * column_ref = nested_table->findByName(new_column_name_prefix, case_insentive))
|
||||||
|
{
|
||||||
|
ColumnWithTypeAndName column = *column_ref;
|
||||||
|
if (case_insentive)
|
||||||
|
column.name = original_column_name;
|
||||||
|
return {std::move(column)};
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!nested_table->has(new_column_name_prefix, case_insentive))
|
||||||
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnsWithTypeAndName columns = {nested_table->getByName(new_column_name_prefix, case_insentive)};
|
||||||
|
Block sub_block(columns);
|
||||||
|
nested_tables[new_column_name_prefix] = std::make_shared<Block>(Nested::flatten(sub_block));
|
||||||
|
return extractColumn(original_column_name, new_column_name_prefix, nested_names.second);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,9 @@ namespace Nested
|
|||||||
/// Returns the prefix of the name to the first '.'. Or the name is unchanged if there is no dot.
|
/// Returns the prefix of the name to the first '.'. Or the name is unchanged if there is no dot.
|
||||||
std::string extractTableName(const std::string & nested_name);
|
std::string extractTableName(const std::string & nested_name);
|
||||||
|
|
||||||
/// Replace Array(Tuple(...)) columns to a multiple of Array columns in a form of `column_name.element_name`.
|
/// Flat a column of nested type into columns
|
||||||
/// only for named tuples that actually represent Nested structures.
|
/// 1) For named tuples,t Tuple(x .., y ..., ...), replace it with t.x ..., t.y ... , ...
|
||||||
|
/// 2) For an Array with named Tuple element column, a Array(Tuple(x ..., y ..., ...)), replace it with multiple Array Columns, a.x ..., a.y ..., ...
|
||||||
Block flatten(const Block & block);
|
Block flatten(const Block & block);
|
||||||
|
|
||||||
/// Collect Array columns in a form of `column_name.element_name` to single Array(Tuple(...)) column.
|
/// Collect Array columns in a form of `column_name.element_name` to single Array(Tuple(...)) column.
|
||||||
@ -35,4 +36,20 @@ namespace Nested
|
|||||||
std::unordered_set<String> getAllTableNames(const Block & block, bool to_lower_case = false);
|
std::unordered_set<String> getAllTableNames(const Block & block, bool to_lower_case = false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Use this class to extract element columns from columns of nested type in a block, e.g. named Tuple.
|
||||||
|
/// It can extract a column from a multiple nested type column, e.g. named Tuple in named Tuple
|
||||||
|
/// Keeps some intermediate data to avoid rebuild them multi-times.
|
||||||
|
class NestedColumnExtractHelper
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit NestedColumnExtractHelper(const Block & block_, bool case_insentive_);
|
||||||
|
std::optional<ColumnWithTypeAndName> extractColumn(const String & column_name);
|
||||||
|
private:
|
||||||
|
std::optional<ColumnWithTypeAndName>
|
||||||
|
extractColumn(const String & original_column_name, const String & column_name_prefix, const String & column_name_suffix);
|
||||||
|
const Block & block;
|
||||||
|
bool case_insentive;
|
||||||
|
std::map<String, BlockPtr> nested_tables;
|
||||||
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user