Merge branch 'master' into urlCluster

This commit is contained in:
Kruglov Pavel 2023-05-22 14:59:34 +02:00 committed by GitHub
commit b5cad024e0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
318 changed files with 6023 additions and 2066 deletions

View File

@ -102,6 +102,17 @@ if (ENABLE_FUZZING)
set (ENABLE_PROTOBUF 1)
endif()
option (ENABLE_WOBOQ_CODEBROWSER "Build for woboq codebrowser" OFF)
if (ENABLE_WOBOQ_CODEBROWSER)
set (ENABLE_EMBEDDED_COMPILER 0)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-poison-system-directories")
# woboq codebrowser uses clang tooling, and they could add default system
# clang includes, and later clang will warn for those added by itself
# includes.
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-poison-system-directories")
endif()
# Global libraries
# See:
# - default_libs.cmake
@ -259,8 +270,8 @@ endif ()
option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT})
if (ENABLE_BUILD_PATH_MAPPING)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${PROJECT_SOURCE_DIR}=.")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${PROJECT_SOURCE_DIR}=.")
endif ()
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
@ -557,7 +568,7 @@ if (NATIVE_BUILD_TARGETS
)
message (STATUS "Building native targets...")
set (NATIVE_BUILD_DIR "${CMAKE_BINARY_DIR}/native")
set (NATIVE_BUILD_DIR "${PROJECT_BINARY_DIR}/native")
execute_process(
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
@ -571,7 +582,7 @@ if (NATIVE_BUILD_TARGETS
# Avoid overriding .cargo/config.toml with native toolchain.
"-DENABLE_RUST=OFF"
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
${CMAKE_SOURCE_DIR}
${PROJECT_SOURCE_DIR}
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
COMMAND_ECHO STDOUT)

View File

@ -3,6 +3,7 @@
#include <cassert>
#include <stdexcept> // for std::logic_error
#include <string>
#include <type_traits>
#include <vector>
#include <functional>
#include <iosfwd>
@ -326,5 +327,16 @@ namespace ZeroTraits
inline void set(StringRef & x) { x.size = 0; }
}
namespace PackedZeroTraits
{
template <typename Second, template <typename, typename> class PackedPairNoInit>
inline bool check(const PackedPairNoInit<StringRef, Second> p)
{ return 0 == p.key.size; }
template <typename Second, template <typename, typename> class PackedPairNoInit>
inline void set(PackedPairNoInit<StringRef, Second> & p)
{ p.key.size = 0; }
}
std::ostream & operator<<(std::ostream & os, const StringRef & str);

View File

@ -5,11 +5,11 @@ if (NOT TARGET check)
if (CMAKE_CONFIGURATION_TYPES)
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
--force-new-ctest-process --output-on-failure --build-config "$<CONFIGURATION>"
WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
else ()
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
--force-new-ctest-process --output-on-failure
WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
endif ()
endif ()

View File

@ -5,14 +5,14 @@ if (Git_FOUND)
# Commit hash + whether the building workspace was dirty or not
execute_process(COMMAND
"${GIT_EXECUTABLE}" rev-parse HEAD
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_HASH
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
# Branch name
execute_process(COMMAND
"${GIT_EXECUTABLE}" rev-parse --abbrev-ref HEAD
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_BRANCH
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
@ -20,14 +20,14 @@ if (Git_FOUND)
SET(ENV{TZ} "UTC")
execute_process(COMMAND
"${GIT_EXECUTABLE}" log -1 --format=%ad --date=iso-local
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_DATE
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
# Subject of the commit
execute_process(COMMAND
"${GIT_EXECUTABLE}" log -1 --format=%s
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_COMMIT_SUBJECT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
@ -35,7 +35,7 @@ if (Git_FOUND)
execute_process(
COMMAND ${GIT_EXECUTABLE} status
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE)
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE)
else()
message(STATUS "Git could not be found.")
endif()

View File

@ -7,6 +7,6 @@ message (STATUS "compiler CXX = ${CMAKE_CXX_COMPILER} ${FULL_CXX_FLAGS}")
message (STATUS "LINKER_FLAGS = ${FULL_EXE_LINKER_FLAGS}")
# Reproducible builds
string (REPLACE "${CMAKE_SOURCE_DIR}" "." FULL_C_FLAGS_NORMALIZED "${FULL_C_FLAGS}")
string (REPLACE "${CMAKE_SOURCE_DIR}" "." FULL_CXX_FLAGS_NORMALIZED "${FULL_CXX_FLAGS}")
string (REPLACE "${CMAKE_SOURCE_DIR}" "." FULL_EXE_LINKER_FLAGS_NORMALIZED "${FULL_EXE_LINKER_FLAGS}")
string (REPLACE "${PROJECT_SOURCE_DIR}" "." FULL_C_FLAGS_NORMALIZED "${FULL_C_FLAGS}")
string (REPLACE "${PROJECT_SOURCE_DIR}" "." FULL_CXX_FLAGS_NORMALIZED "${FULL_CXX_FLAGS}")
string (REPLACE "${PROJECT_SOURCE_DIR}" "." FULL_EXE_LINKER_FLAGS_NORMALIZED "${FULL_EXE_LINKER_FLAGS}")

View File

@ -29,14 +29,14 @@ if (SANITIZE)
# Linking can fail due to relocation overflows (see #49145), caused by too big object files / libraries.
# Work around this with position-independent builds (-fPIC and -fpie), this is slightly slower than non-PIC/PIE but that's okay.
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fPIC -fpie -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fPIC -fpie -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/tests/msan_suppressions.txt")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
elseif (SANITIZE STREQUAL "thread")
set (TSAN_FLAGS "-fsanitize=thread")
if (COMPILER_CLANG)
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt")
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/tests/tsan_suppressions.txt")
endif()
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}")
@ -54,7 +54,7 @@ if (SANITIZE)
set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
endif()
if (COMPILER_CLANG)
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/tests/ubsan_suppressions.txt")
endif()
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")

View File

@ -1,4 +1,4 @@
include(${CMAKE_SOURCE_DIR}/cmake/autogenerated_versions.txt)
include(${PROJECT_SOURCE_DIR}/cmake/autogenerated_versions.txt)
set(VERSION_EXTRA "" CACHE STRING "")
set(VERSION_TWEAK "" CACHE STRING "")

View File

@ -6,7 +6,7 @@ if (NOT ENABLE_AVRO)
return()
endif()
set(AVROCPP_ROOT_DIR "${CMAKE_SOURCE_DIR}/contrib/avro/lang/c++")
set(AVROCPP_ROOT_DIR "${PROJECT_SOURCE_DIR}/contrib/avro/lang/c++")
set(AVROCPP_INCLUDE_DIR "${AVROCPP_ROOT_DIR}/api")
set(AVROCPP_SOURCE_DIR "${AVROCPP_ROOT_DIR}/impl")

View File

@ -18,7 +18,7 @@ endif()
# Need to use C++17 since the compilation is not possible with C++20 currently.
set (CMAKE_CXX_STANDARD 17)
set(CASS_ROOT_DIR ${CMAKE_SOURCE_DIR}/contrib/cassandra)
set(CASS_ROOT_DIR ${PROJECT_SOURCE_DIR}/contrib/cassandra)
set(CASS_SRC_DIR "${CASS_ROOT_DIR}/src")
set(CASS_INCLUDE_DIR "${CASS_ROOT_DIR}/include")

View File

@ -26,7 +26,7 @@ endif ()
# StorageSystemTimeZones.generated.cpp is autogenerated each time during a build
# data in this file will be used to populate the system.time_zones table, this is specific to OS_LINUX
# as the library that's built using embedded tzdata is also specific to OS_LINUX
set(SYSTEM_STORAGE_TZ_FILE "${CMAKE_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
set(SYSTEM_STORAGE_TZ_FILE "${PROJECT_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
# remove existing copies so that its generated fresh on each build.
file(REMOVE ${SYSTEM_STORAGE_TZ_FILE})

View File

@ -1,7 +1,7 @@
# This file is a modified version of contrib/libuv/CMakeLists.txt
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/libuv")
set (BINARY_DIR "${CMAKE_BINARY_DIR}/contrib/libuv")
set (SOURCE_DIR "${PROJECT_SOURCE_DIR}/contrib/libuv")
set (BINARY_DIR "${PROJECT_BINARY_DIR}/contrib/libuv")
set(uv_sources
src/fs-poll.c

View File

@ -15,7 +15,7 @@ endif()
# This is the LGPL libmariadb project.
set(CC_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/mariadb-connector-c)
set(CC_SOURCE_DIR ${PROJECT_SOURCE_DIR}/contrib/mariadb-connector-c)
set(CC_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(WITH_SSL ON)

View File

@ -1,4 +1,4 @@
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/snappy")
set (SOURCE_DIR "${PROJECT_SOURCE_DIR}/contrib/snappy")
if (ARCH_S390X)
set (SNAPPY_IS_BIG_ENDIAN 1)

View File

@ -1,4 +1,4 @@
set (SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/zlib-ng)
set (SOURCE_DIR ${PROJECT_SOURCE_DIR}/contrib/zlib-ng)
add_definitions(-DZLIB_COMPAT)
add_definitions(-DWITH_GZFILEOP)

View File

@ -15,7 +15,7 @@ nproc=$(($(nproc) + 2)) # increase parallelism
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
mkdir -p "$BUILD_DIRECTORY" && cd "$BUILD_DIRECTORY"
cmake "$SOURCE_DIRECTORY" -DCMAKE_CXX_COMPILER="/usr/bin/clang++-${LLVM_VERSION}" -DCMAKE_C_COMPILER="/usr/bin/clang-${LLVM_VERSION}" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 "${CMAKE_FLAGS[@]}"
cmake "$SOURCE_DIRECTORY" -DCMAKE_CXX_COMPILER="/usr/bin/clang++-${LLVM_VERSION}" -DCMAKE_C_COMPILER="/usr/bin/clang-${LLVM_VERSION}" -DENABLE_WOBOQ_CODEBROWSER=ON "${CMAKE_FLAGS[@]}"
mkdir -p "$HTML_RESULT_DIRECTORY"
echo 'Filter out too noisy "Error: filename" lines and keep them in full codebrowser_generator.log'
/woboq_codebrowser/generator/codebrowser_generator -b "$BUILD_DIRECTORY" -a \

View File

@ -13,8 +13,8 @@ The PostgreSQL engine allows to perform `SELECT` and `INSERT` queries on data th
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
name1 type1 [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
name2 type2 [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
...
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]);
```

View File

@ -0,0 +1,636 @@
---
slug: /en/getting-started/example-datasets/reddit-comments
sidebar_label: Reddit comments
---
# Reddit comments dataset
This dataset contains publicly-available comments on Reddit that go back to December, 2005, to March, 2023, and contains over 7B rows of data. The raw data is in JSON format in compressed `.zst` files and the rows look like the following:
```json
{"controversiality":0,"body":"A look at Vietnam and Mexico exposes the myth of market liberalisation.","subreddit_id":"t5_6","link_id":"t3_17863","stickied":false,"subreddit":"reddit.com","score":2,"ups":2,"author_flair_css_class":null,"created_utc":1134365188,"author_flair_text":null,"author":"frjo","id":"c13","edited":false,"parent_id":"t3_17863","gilded":0,"distinguished":null,"retrieved_on":1473738411}
{"created_utc":1134365725,"author_flair_css_class":null,"score":1,"ups":1,"subreddit":"reddit.com","stickied":false,"link_id":"t3_17866","subreddit_id":"t5_6","controversiality":0,"body":"The site states \"What can I use it for? Meeting notes, Reports, technical specs Sign-up sheets, proposals and much more...\", just like any other new breeed of sites that want us to store everything we have on the web. And they even guarantee multiple levels of security and encryption etc. But what prevents these web site operators fom accessing and/or stealing Meeting notes, Reports, technical specs Sign-up sheets, proposals and much more, for competitive or personal gains...? I am pretty sure that most of them are honest, but what's there to prevent me from setting up a good useful site and stealing all your data? Call me paranoid - I am.","retrieved_on":1473738411,"distinguished":null,"gilded":0,"id":"c14","edited":false,"parent_id":"t3_17866","author":"zse7zse","author_flair_text":null}
{"gilded":0,"distinguished":null,"retrieved_on":1473738411,"author":"[deleted]","author_flair_text":null,"edited":false,"id":"c15","parent_id":"t3_17869","subreddit":"reddit.com","score":0,"ups":0,"created_utc":1134366848,"author_flair_css_class":null,"body":"Jython related topics by Frank Wierzbicki","controversiality":0,"subreddit_id":"t5_6","stickied":false,"link_id":"t3_17869"}
{"gilded":0,"retrieved_on":1473738411,"distinguished":null,"author_flair_text":null,"author":"[deleted]","edited":false,"parent_id":"t3_17870","id":"c16","subreddit":"reddit.com","created_utc":1134367660,"author_flair_css_class":null,"score":1,"ups":1,"body":"[deleted]","controversiality":0,"stickied":false,"link_id":"t3_17870","subreddit_id":"t5_6"}
{"gilded":0,"retrieved_on":1473738411,"distinguished":null,"author_flair_text":null,"author":"rjoseph","edited":false,"id":"c17","parent_id":"t3_17817","subreddit":"reddit.com","author_flair_css_class":null,"created_utc":1134367754,"score":1,"ups":1,"body":"Saft is by far the best extension you could tak onto your Safari","controversiality":0,"link_id":"t3_17817","stickied":false,"subreddit_id":"t5_6"}
```
A shoutout to Percona for the [motivation behind ingesting this dataset](https://www.percona.com/blog/big-data-set-reddit-comments-analyzing-clickhouse/), which we have downloaded and stored in an S3 bucket.
:::note
The following commands were executed on ClickHouse Cloud. To run this on your own cluster, replace `default` in the `s3Cluster` function call with the name of your cluster. If you do not have a cluster, then replace the `s3Cluster` function with the `s3` function.
:::
1. Let's create a table for the Reddit data:
```sql
CREATE TABLE reddit
(
subreddit LowCardinality(String),
subreddit_id LowCardinality(String),
subreddit_type Enum('public' = 1, 'restricted' = 2, 'user' = 3, 'archived' = 4, 'gold_restricted' = 5, 'private' = 6),
author LowCardinality(String),
body String CODEC(ZSTD(6)),
created_date Date DEFAULT toDate(created_utc),
created_utc DateTime,
retrieved_on DateTime,
id String,
parent_id String,
link_id String,
score Int32,
total_awards_received UInt16,
controversiality UInt8,
gilded UInt8,
collapsed_because_crowd_control UInt8,
collapsed_reason Enum('' = 0, 'comment score below threshold' = 1, 'may be sensitive content' = 2, 'potentially toxic' = 3, 'potentially toxic content' = 4),
distinguished Enum('' = 0, 'moderator' = 1, 'admin' = 2, 'special' = 3),
removal_reason Enum('' = 0, 'legal' = 1),
author_created_utc DateTime,
author_fullname LowCardinality(String),
author_patreon_flair UInt8,
author_premium UInt8,
can_gild UInt8,
can_mod_post UInt8,
collapsed UInt8,
is_submitter UInt8,
_edited String,
locked UInt8,
quarantined UInt8,
no_follow UInt8,
send_replies UInt8,
stickied UInt8,
author_flair_text LowCardinality(String)
)
ENGINE = MergeTree
ORDER BY (subreddit, created_date, author);
```
:::note
The names of the files in S3 start with `RC_YYYY-MM` where `YYYY-MM` goes from `2005-12` to `2023-02`. The compression changes a couple of times though, so the file extensions are not consistent. For example:
- the file names are initially `RC_2005-12.bz2` to `RC_2017-11.bz2`
- then they look like `RC_2017-12.xz` to `RC_2018-09.xz`
- and finally `RC_2018-10.zst` to `RC_2023-02.zst`
:::
2. We are going to start with one month of data, but if you want to simply insert every row - skip ahead to step 8 below. The following file has 86M records from December, 2017:
```sql
INSERT INTO reddit
SELECT *
FROM s3Cluster(
'default',
'https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/reddit/original/RC_2017-12.xz',
'JSONEachRow'
);
```
If you do not have a cluster, use `s3` instead of `s3Cluster`:
```sql
INSERT INTO reddit
SELECT *
FROM s3(
'https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/reddit/original/RC_2017-12.xz',
'JSONEachRow'
);
```
3. It will take a while depending on your resources, but when it's done verify it worked:
```sql
SELECT formatReadableQuantity(count())
FROM reddit;
```
```response
┌─formatReadableQuantity(count())─┐
│ 85.97 million │
└─────────────────────────────────┘
```
4. Let's see how many unique subreddits were in December of 2017:
```sql
SELECT uniqExact(subreddit)
FROM reddit;
```
```response
┌─uniqExact(subreddit)─┐
│ 91613 │
└──────────────────────┘
1 row in set. Elapsed: 1.572 sec. Processed 85.97 million rows, 367.43 MB (54.71 million rows/s., 233.80 MB/s.)
```
5. This query returns the top 10 subreddits (in terms of number of comments):
```sql
SELECT
subreddit,
count() AS c
FROM reddit
GROUP BY subreddit
ORDER BY c DESC
LIMIT 20;
```
```response
┌─subreddit───────┬───────c─┐
│ AskReddit │ 5245881 │
│ politics │ 1753120 │
│ nfl │ 1220266 │
│ nba │ 960388 │
│ The_Donald │ 931857 │
│ news │ 796617 │
│ worldnews │ 765709 │
│ CFB │ 710360 │
│ gaming │ 602761 │
│ movies │ 601966 │
│ soccer │ 590628 │
│ Bitcoin │ 583783 │
│ pics │ 563408 │
│ StarWars │ 562514 │
│ funny │ 547563 │
│ leagueoflegends │ 517213 │
│ teenagers │ 492020 │
│ DestinyTheGame │ 477377 │
│ todayilearned │ 472650 │
│ videos │ 450581 │
└─────────────────┴─────────┘
20 rows in set. Elapsed: 0.368 sec. Processed 85.97 million rows, 367.43 MB (233.34 million rows/s., 997.25 MB/s.)
```
6. Here are the top 10 authors in December of 2017, in terms of number of comments posted:
```sql
SELECT
author,
count() AS c
FROM reddit
GROUP BY author
ORDER BY c DESC
LIMIT 10;
```
```response
┌─author──────────┬───────c─┐
│ [deleted] │ 5913324 │
│ AutoModerator │ 784886 │
│ ImagesOfNetwork │ 83241 │
│ BitcoinAllBot │ 54484 │
│ imguralbumbot │ 45822 │
│ RPBot │ 29337 │
│ WikiTextBot │ 25982 │
│ Concise_AMA_Bot │ 19974 │
│ MTGCardFetcher │ 19103 │
│ TotesMessenger │ 19057 │
└─────────────────┴─────────┘
10 rows in set. Elapsed: 8.143 sec. Processed 85.97 million rows, 711.05 MB (10.56 million rows/s., 87.32 MB/s.)
```
7. We already inserted some data, but we will start over:
```sql
TRUNCATE TABLE reddit;
```
8. This is a fun dataset and it looks like we can find some great information, so let's go ahead and insert the entire dataset from 2005 to 2023. When you're ready, run this command to insert all the rows. (It takes a while - up to 17 hours!)
```sql
INSERT INTO reddit
SELECT *
FROM s3Cluster(
'default',
'https://clickhouse-public-datasets.s3.amazonaws.com/reddit/original/RC*',
'JSONEachRow'
)
SETTINGS zstd_window_log_max = 31;
```
The response looks like:
```response
0 rows in set. Elapsed: 61187.839 sec. Processed 6.74 billion rows, 2.06 TB (110.17 thousand rows/s., 33.68 MB/s.)
```
8. Let's see how many rows were inserted and how much disk space the table is using:
```sql
SELECT
sum(rows) AS count,
formatReadableQuantity(count),
formatReadableSize(sum(bytes)) AS disk_size,
formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size
FROM system.parts
WHERE (table = 'reddit') AND active
```
Notice the compression of disk storage is about 1/3 of the uncompressed size:
```response
┌──────count─┬─formatReadableQuantity(sum(rows))─┬─disk_size──┬─uncompressed_size─┐
│ 6739503568 │ 6.74 billion │ 501.10 GiB │ 1.51 TiB │
└────────────┴───────────────────────────────────┴────────────┴───────────────────┘
1 row in set. Elapsed: 0.010 sec.
```
9. The following query shows how many comments, authors and subreddits we have for each month:
```sql
SELECT
toStartOfMonth(created_utc) AS firstOfMonth,
count() AS c,
bar(c, 0, 50000000, 25) AS bar_count,
uniq(author) AS authors,
bar(authors, 0, 5000000, 25) AS bar_authors,
uniq(subreddit) AS subreddits,
bar(subreddits, 0, 100000, 25) AS bar_subreddits
FROM reddit
GROUP BY firstOfMonth
ORDER BY firstOfMonth ASC;
```
This is a substantial query that has to process all 6.74 billion rows, but we still get an impressive response time (about 3 minutes):
```response
┌─firstOfMonth─┬─────────c─┬─bar_count─────────────────┬─authors─┬─bar_authors───────────────┬─subreddits─┬─bar_subreddits────────────┐
│ 2005-12-01 │ 1075 │ │ 394 │ │ 1 │ │
│ 2006-01-01 │ 3666 │ │ 791 │ │ 2 │ │
│ 2006-02-01 │ 9095 │ │ 1464 │ │ 18 │ │
│ 2006-03-01 │ 13859 │ │ 1958 │ │ 15 │ │
│ 2006-04-01 │ 19090 │ │ 2334 │ │ 21 │ │
│ 2006-05-01 │ 26859 │ │ 2698 │ │ 21 │ │
│ 2006-06-01 │ 29163 │ │ 3043 │ │ 19 │ │
│ 2006-07-01 │ 37031 │ │ 3532 │ │ 22 │ │
│ 2006-08-01 │ 50559 │ │ 4750 │ │ 24 │ │
│ 2006-09-01 │ 50675 │ │ 4908 │ │ 21 │ │
│ 2006-10-01 │ 54148 │ │ 5654 │ │ 31 │ │
│ 2006-11-01 │ 62021 │ │ 6490 │ │ 23 │ │
│ 2006-12-01 │ 61018 │ │ 6707 │ │ 24 │ │
│ 2007-01-01 │ 81341 │ │ 7931 │ │ 23 │ │
│ 2007-02-01 │ 95634 │ │ 9020 │ │ 21 │ │
│ 2007-03-01 │ 112444 │ │ 10842 │ │ 23 │ │
│ 2007-04-01 │ 126773 │ │ 10701 │ │ 26 │ │
│ 2007-05-01 │ 170097 │ │ 11365 │ │ 25 │ │
│ 2007-06-01 │ 178800 │ │ 11267 │ │ 22 │ │
│ 2007-07-01 │ 203319 │ │ 12482 │ │ 25 │ │
│ 2007-08-01 │ 225111 │ │ 14124 │ │ 30 │ │
│ 2007-09-01 │ 259497 │ ▏ │ 15416 │ │ 33 │ │
│ 2007-10-01 │ 274170 │ ▏ │ 15302 │ │ 36 │ │
│ 2007-11-01 │ 372983 │ ▏ │ 15134 │ │ 43 │ │
│ 2007-12-01 │ 363390 │ ▏ │ 15915 │ │ 31 │ │
│ 2008-01-01 │ 452990 │ ▏ │ 18857 │ │ 126 │ │
│ 2008-02-01 │ 441768 │ ▏ │ 18266 │ │ 173 │ │
│ 2008-03-01 │ 463728 │ ▏ │ 18947 │ │ 292 │ │
│ 2008-04-01 │ 468317 │ ▏ │ 18590 │ │ 323 │ │
│ 2008-05-01 │ 536380 │ ▎ │ 20861 │ │ 375 │ │
│ 2008-06-01 │ 577684 │ ▎ │ 22557 │ │ 575 │ ▏ │
│ 2008-07-01 │ 592610 │ ▎ │ 23123 │ │ 657 │ ▏ │
│ 2008-08-01 │ 595959 │ ▎ │ 23729 │ │ 707 │ ▏ │
│ 2008-09-01 │ 680892 │ ▎ │ 26374 │ ▏ │ 801 │ ▏ │
│ 2008-10-01 │ 789874 │ ▍ │ 28970 │ ▏ │ 893 │ ▏ │
│ 2008-11-01 │ 792310 │ ▍ │ 30272 │ ▏ │ 1024 │ ▎ │
│ 2008-12-01 │ 850359 │ ▍ │ 34073 │ ▏ │ 1103 │ ▎ │
│ 2009-01-01 │ 1051649 │ ▌ │ 38978 │ ▏ │ 1316 │ ▎ │
│ 2009-02-01 │ 944711 │ ▍ │ 43390 │ ▏ │ 1132 │ ▎ │
│ 2009-03-01 │ 1048643 │ ▌ │ 46516 │ ▏ │ 1203 │ ▎ │
│ 2009-04-01 │ 1094599 │ ▌ │ 48284 │ ▏ │ 1334 │ ▎ │
│ 2009-05-01 │ 1201257 │ ▌ │ 52512 │ ▎ │ 1395 │ ▎ │
│ 2009-06-01 │ 1258750 │ ▋ │ 57728 │ ▎ │ 1473 │ ▎ │
│ 2009-07-01 │ 1470290 │ ▋ │ 60098 │ ▎ │ 1686 │ ▍ │
│ 2009-08-01 │ 1750688 │ ▉ │ 67347 │ ▎ │ 1777 │ ▍ │
│ 2009-09-01 │ 2032276 │ █ │ 78051 │ ▍ │ 1784 │ ▍ │
│ 2009-10-01 │ 2242017 │ █ │ 93409 │ ▍ │ 2071 │ ▌ │
│ 2009-11-01 │ 2207444 │ █ │ 95940 │ ▍ │ 2141 │ ▌ │
│ 2009-12-01 │ 2560510 │ █▎ │ 104239 │ ▌ │ 2141 │ ▌ │
│ 2010-01-01 │ 2884096 │ █▍ │ 114314 │ ▌ │ 2313 │ ▌ │
│ 2010-02-01 │ 2687779 │ █▎ │ 115683 │ ▌ │ 2522 │ ▋ │
│ 2010-03-01 │ 3228254 │ █▌ │ 125775 │ ▋ │ 2890 │ ▋ │
│ 2010-04-01 │ 3209898 │ █▌ │ 128936 │ ▋ │ 3170 │ ▊ │
│ 2010-05-01 │ 3267363 │ █▋ │ 131851 │ ▋ │ 3166 │ ▊ │
│ 2010-06-01 │ 3532867 │ █▊ │ 139522 │ ▋ │ 3301 │ ▊ │
│ 2010-07-01 │ 4032737 │ ██ │ 153451 │ ▊ │ 3662 │ ▉ │
│ 2010-08-01 │ 4247982 │ ██ │ 164071 │ ▊ │ 3653 │ ▉ │
│ 2010-09-01 │ 4704069 │ ██▎ │ 186613 │ ▉ │ 4009 │ █ │
│ 2010-10-01 │ 5032368 │ ██▌ │ 203800 │ █ │ 4154 │ █ │
│ 2010-11-01 │ 5689002 │ ██▊ │ 226134 │ █▏ │ 4383 │ █ │
│ 2010-12-01 │ 5972642 │ ██▉ │ 245824 │ █▏ │ 4692 │ █▏ │
│ 2011-01-01 │ 6603329 │ ███▎ │ 270025 │ █▎ │ 5141 │ █▎ │
│ 2011-02-01 │ 6363114 │ ███▏ │ 277593 │ █▍ │ 5202 │ █▎ │
│ 2011-03-01 │ 7556165 │ ███▊ │ 314748 │ █▌ │ 5445 │ █▎ │
│ 2011-04-01 │ 7571398 │ ███▊ │ 329920 │ █▋ │ 6128 │ █▌ │
│ 2011-05-01 │ 8803949 │ ████▍ │ 365013 │ █▊ │ 6834 │ █▋ │
│ 2011-06-01 │ 9766511 │ ████▉ │ 393945 │ █▉ │ 7519 │ █▉ │
│ 2011-07-01 │ 10557466 │ █████▎ │ 424235 │ ██ │ 8293 │ ██ │
│ 2011-08-01 │ 12316144 │ ██████▏ │ 475326 │ ██▍ │ 9657 │ ██▍ │
│ 2011-09-01 │ 12150412 │ ██████ │ 503142 │ ██▌ │ 10278 │ ██▌ │
│ 2011-10-01 │ 13470278 │ ██████▋ │ 548801 │ ██▋ │ 10922 │ ██▋ │
│ 2011-11-01 │ 13621533 │ ██████▊ │ 574435 │ ██▊ │ 11572 │ ██▉ │
│ 2011-12-01 │ 14509469 │ ███████▎ │ 622849 │ ███ │ 12335 │ ███ │
│ 2012-01-01 │ 16350205 │ ████████▏ │ 696110 │ ███▍ │ 14281 │ ███▌ │
│ 2012-02-01 │ 16015695 │ ████████ │ 722892 │ ███▌ │ 14949 │ ███▋ │
│ 2012-03-01 │ 17881943 │ ████████▉ │ 789664 │ ███▉ │ 15795 │ ███▉ │
│ 2012-04-01 │ 19044534 │ █████████▌ │ 842491 │ ████▏ │ 16440 │ ████ │
│ 2012-05-01 │ 20388260 │ ██████████▏ │ 886176 │ ████▍ │ 16974 │ ████▏ │
│ 2012-06-01 │ 21897913 │ ██████████▉ │ 946798 │ ████▋ │ 17952 │ ████▍ │
│ 2012-07-01 │ 24087517 │ ████████████ │ 1018636 │ █████ │ 19069 │ ████▊ │
│ 2012-08-01 │ 25703326 │ ████████████▊ │ 1094445 │ █████▍ │ 20553 │ █████▏ │
│ 2012-09-01 │ 23419524 │ ███████████▋ │ 1088491 │ █████▍ │ 20831 │ █████▏ │
│ 2012-10-01 │ 24788236 │ ████████████▍ │ 1131885 │ █████▋ │ 21868 │ █████▍ │
│ 2012-11-01 │ 24648302 │ ████████████▎ │ 1167608 │ █████▊ │ 21791 │ █████▍ │
│ 2012-12-01 │ 26080276 │ █████████████ │ 1218402 │ ██████ │ 22622 │ █████▋ │
│ 2013-01-01 │ 30365867 │ ███████████████▏ │ 1341703 │ ██████▋ │ 24696 │ ██████▏ │
│ 2013-02-01 │ 27213960 │ █████████████▌ │ 1304756 │ ██████▌ │ 24514 │ ██████▏ │
│ 2013-03-01 │ 30771274 │ ███████████████▍ │ 1391703 │ ██████▉ │ 25730 │ ██████▍ │
│ 2013-04-01 │ 33259557 │ ████████████████▋ │ 1485971 │ ███████▍ │ 27294 │ ██████▊ │
│ 2013-05-01 │ 33126225 │ ████████████████▌ │ 1506473 │ ███████▌ │ 27299 │ ██████▊ │
│ 2013-06-01 │ 32648247 │ ████████████████▎ │ 1506650 │ ███████▌ │ 27450 │ ██████▊ │
│ 2013-07-01 │ 34922133 │ █████████████████▍ │ 1561771 │ ███████▊ │ 28294 │ ███████ │
│ 2013-08-01 │ 34766579 │ █████████████████▍ │ 1589781 │ ███████▉ │ 28943 │ ███████▏ │
│ 2013-09-01 │ 31990369 │ ███████████████▉ │ 1570342 │ ███████▊ │ 29408 │ ███████▎ │
│ 2013-10-01 │ 35940040 │ █████████████████▉ │ 1683770 │ ████████▍ │ 30273 │ ███████▌ │
│ 2013-11-01 │ 37396497 │ ██████████████████▋ │ 1757467 │ ████████▊ │ 31173 │ ███████▊ │
│ 2013-12-01 │ 39810216 │ ███████████████████▉ │ 1846204 │ █████████▏ │ 32326 │ ████████ │
│ 2014-01-01 │ 42420655 │ █████████████████████▏ │ 1927229 │ █████████▋ │ 35603 │ ████████▉ │
│ 2014-02-01 │ 38703362 │ ███████████████████▎ │ 1874067 │ █████████▎ │ 37007 │ █████████▎ │
│ 2014-03-01 │ 42459956 │ █████████████████████▏ │ 1959888 │ █████████▊ │ 37948 │ █████████▍ │
│ 2014-04-01 │ 42440735 │ █████████████████████▏ │ 1951369 │ █████████▊ │ 38362 │ █████████▌ │
│ 2014-05-01 │ 42514094 │ █████████████████████▎ │ 1970197 │ █████████▊ │ 39078 │ █████████▊ │
│ 2014-06-01 │ 41990650 │ ████████████████████▉ │ 1943850 │ █████████▋ │ 38268 │ █████████▌ │
│ 2014-07-01 │ 46868899 │ ███████████████████████▍ │ 2059346 │ ██████████▎ │ 40634 │ ██████████▏ │
│ 2014-08-01 │ 46990813 │ ███████████████████████▍ │ 2117335 │ ██████████▌ │ 41764 │ ██████████▍ │
│ 2014-09-01 │ 44992201 │ ██████████████████████▍ │ 2124708 │ ██████████▌ │ 41890 │ ██████████▍ │
│ 2014-10-01 │ 47497520 │ ███████████████████████▋ │ 2206535 │ ███████████ │ 43109 │ ██████████▊ │
│ 2014-11-01 │ 46118074 │ ███████████████████████ │ 2239747 │ ███████████▏ │ 43718 │ ██████████▉ │
│ 2014-12-01 │ 48807699 │ ████████████████████████▍ │ 2372945 │ ███████████▊ │ 43823 │ ██████████▉ │
│ 2015-01-01 │ 53851542 │ █████████████████████████ │ 2499536 │ ████████████▍ │ 47172 │ ███████████▊ │
│ 2015-02-01 │ 48342747 │ ████████████████████████▏ │ 2448496 │ ████████████▏ │ 47229 │ ███████████▊ │
│ 2015-03-01 │ 54564441 │ █████████████████████████ │ 2550534 │ ████████████▊ │ 48156 │ ████████████ │
│ 2015-04-01 │ 55005780 │ █████████████████████████ │ 2609443 │ █████████████ │ 49865 │ ████████████▍ │
│ 2015-05-01 │ 54504410 │ █████████████████████████ │ 2585535 │ ████████████▉ │ 50137 │ ████████████▌ │
│ 2015-06-01 │ 54258492 │ █████████████████████████ │ 2595129 │ ████████████▉ │ 49598 │ ████████████▍ │
│ 2015-07-01 │ 58451788 │ █████████████████████████ │ 2720026 │ █████████████▌ │ 55022 │ █████████████▊ │
│ 2015-08-01 │ 58075327 │ █████████████████████████ │ 2743994 │ █████████████▋ │ 55302 │ █████████████▊ │
│ 2015-09-01 │ 55574825 │ █████████████████████████ │ 2672793 │ █████████████▎ │ 53960 │ █████████████▍ │
│ 2015-10-01 │ 59494045 │ █████████████████████████ │ 2816426 │ ██████████████ │ 70210 │ █████████████████▌ │
│ 2015-11-01 │ 57117500 │ █████████████████████████ │ 2847146 │ ██████████████▏ │ 71363 │ █████████████████▊ │
│ 2015-12-01 │ 58523312 │ █████████████████████████ │ 2854840 │ ██████████████▎ │ 94559 │ ███████████████████████▋ │
│ 2016-01-01 │ 61991732 │ █████████████████████████ │ 2920366 │ ██████████████▌ │ 108438 │ █████████████████████████ │
│ 2016-02-01 │ 59189875 │ █████████████████████████ │ 2854683 │ ██████████████▎ │ 109916 │ █████████████████████████ │
│ 2016-03-01 │ 63918864 │ █████████████████████████ │ 2969542 │ ██████████████▊ │ 84787 │ █████████████████████▏ │
│ 2016-04-01 │ 64271256 │ █████████████████████████ │ 2999086 │ ██████████████▉ │ 61647 │ ███████████████▍ │
│ 2016-05-01 │ 65212004 │ █████████████████████████ │ 3034674 │ ███████████████▏ │ 67465 │ ████████████████▊ │
│ 2016-06-01 │ 65867743 │ █████████████████████████ │ 3057604 │ ███████████████▎ │ 75170 │ ██████████████████▊ │
│ 2016-07-01 │ 66974735 │ █████████████████████████ │ 3199374 │ ███████████████▉ │ 77732 │ ███████████████████▍ │
│ 2016-08-01 │ 69654819 │ █████████████████████████ │ 3239957 │ ████████████████▏ │ 63080 │ ███████████████▊ │
│ 2016-09-01 │ 67024973 │ █████████████████████████ │ 3190864 │ ███████████████▉ │ 62324 │ ███████████████▌ │
│ 2016-10-01 │ 71826553 │ █████████████████████████ │ 3284340 │ ████████████████▍ │ 62549 │ ███████████████▋ │
│ 2016-11-01 │ 71022319 │ █████████████████████████ │ 3300822 │ ████████████████▌ │ 69718 │ █████████████████▍ │
│ 2016-12-01 │ 72942967 │ █████████████████████████ │ 3430324 │ █████████████████▏ │ 71705 │ █████████████████▉ │
│ 2017-01-01 │ 78946585 │ █████████████████████████ │ 3572093 │ █████████████████▊ │ 78198 │ ███████████████████▌ │
│ 2017-02-01 │ 70609487 │ █████████████████████████ │ 3421115 │ █████████████████ │ 69823 │ █████████████████▍ │
│ 2017-03-01 │ 79723106 │ █████████████████████████ │ 3638122 │ ██████████████████▏ │ 73865 │ ██████████████████▍ │
│ 2017-04-01 │ 77478009 │ █████████████████████████ │ 3620591 │ ██████████████████ │ 74387 │ ██████████████████▌ │
│ 2017-05-01 │ 79810360 │ █████████████████████████ │ 3650820 │ ██████████████████▎ │ 74356 │ ██████████████████▌ │
│ 2017-06-01 │ 79901711 │ █████████████████████████ │ 3737614 │ ██████████████████▋ │ 72114 │ ██████████████████ │
│ 2017-07-01 │ 81798725 │ █████████████████████████ │ 3872330 │ ███████████████████▎ │ 76052 │ ███████████████████ │
│ 2017-08-01 │ 84658503 │ █████████████████████████ │ 3960093 │ ███████████████████▊ │ 77798 │ ███████████████████▍ │
│ 2017-09-01 │ 83165192 │ █████████████████████████ │ 3880501 │ ███████████████████▍ │ 78402 │ ███████████████████▌ │
│ 2017-10-01 │ 85828912 │ █████████████████████████ │ 3980335 │ ███████████████████▉ │ 80685 │ ████████████████████▏ │
│ 2017-11-01 │ 84965681 │ █████████████████████████ │ 4026749 │ ████████████████████▏ │ 82659 │ ████████████████████▋ │
│ 2017-12-01 │ 85973810 │ █████████████████████████ │ 4196354 │ ████████████████████▉ │ 91984 │ ██████████████████████▉ │
│ 2018-01-01 │ 91558594 │ █████████████████████████ │ 4364443 │ █████████████████████▊ │ 102577 │ █████████████████████████ │
│ 2018-02-01 │ 86467179 │ █████████████████████████ │ 4277899 │ █████████████████████▍ │ 104610 │ █████████████████████████ │
│ 2018-03-01 │ 96490262 │ █████████████████████████ │ 4422470 │ ██████████████████████ │ 112559 │ █████████████████████████ │
│ 2018-04-01 │ 98101232 │ █████████████████████████ │ 4572434 │ ██████████████████████▊ │ 105284 │ █████████████████████████ │
│ 2018-05-01 │ 100109100 │ █████████████████████████ │ 4698908 │ ███████████████████████▍ │ 103910 │ █████████████████████████ │
│ 2018-06-01 │ 100009462 │ █████████████████████████ │ 4697426 │ ███████████████████████▍ │ 101107 │ █████████████████████████ │
│ 2018-07-01 │ 108151359 │ █████████████████████████ │ 5099492 │ █████████████████████████ │ 106184 │ █████████████████████████ │
│ 2018-08-01 │ 107330940 │ █████████████████████████ │ 5084082 │ █████████████████████████ │ 109985 │ █████████████████████████ │
│ 2018-09-01 │ 104473929 │ █████████████████████████ │ 5011953 │ █████████████████████████ │ 109710 │ █████████████████████████ │
│ 2018-10-01 │ 112346556 │ █████████████████████████ │ 5320405 │ █████████████████████████ │ 112533 │ █████████████████████████ │
│ 2018-11-01 │ 112573001 │ █████████████████████████ │ 5353282 │ █████████████████████████ │ 112211 │ █████████████████████████ │
│ 2018-12-01 │ 121953600 │ █████████████████████████ │ 5611543 │ █████████████████████████ │ 118291 │ █████████████████████████ │
│ 2019-01-01 │ 129386587 │ █████████████████████████ │ 6016687 │ █████████████████████████ │ 125725 │ █████████████████████████ │
│ 2019-02-01 │ 120645639 │ █████████████████████████ │ 5974488 │ █████████████████████████ │ 125420 │ █████████████████████████ │
│ 2019-03-01 │ 137650471 │ █████████████████████████ │ 6410197 │ █████████████████████████ │ 135924 │ █████████████████████████ │
│ 2019-04-01 │ 138473643 │ █████████████████████████ │ 6416384 │ █████████████████████████ │ 139844 │ █████████████████████████ │
│ 2019-05-01 │ 142463421 │ █████████████████████████ │ 6574836 │ █████████████████████████ │ 142012 │ █████████████████████████ │
│ 2019-06-01 │ 134172939 │ █████████████████████████ │ 6601267 │ █████████████████████████ │ 140997 │ █████████████████████████ │
│ 2019-07-01 │ 145965083 │ █████████████████████████ │ 6901822 │ █████████████████████████ │ 147802 │ █████████████████████████ │
│ 2019-08-01 │ 146854393 │ █████████████████████████ │ 6993882 │ █████████████████████████ │ 151888 │ █████████████████████████ │
│ 2019-09-01 │ 137540219 │ █████████████████████████ │ 7001362 │ █████████████████████████ │ 148839 │ █████████████████████████ │
│ 2019-10-01 │ 129771456 │ █████████████████████████ │ 6825690 │ █████████████████████████ │ 144453 │ █████████████████████████ │
│ 2019-11-01 │ 107990259 │ █████████████████████████ │ 6368286 │ █████████████████████████ │ 141768 │ █████████████████████████ │
│ 2019-12-01 │ 112895934 │ █████████████████████████ │ 6640902 │ █████████████████████████ │ 148277 │ █████████████████████████ │
│ 2020-01-01 │ 54354879 │ █████████████████████████ │ 4782339 │ ███████████████████████▉ │ 111658 │ █████████████████████████ │
│ 2020-02-01 │ 22696923 │ ███████████▎ │ 3135175 │ ███████████████▋ │ 79521 │ ███████████████████▉ │
│ 2020-03-01 │ 3466677 │ █▋ │ 987960 │ ████▉ │ 40901 │ ██████████▏ │
└──────────────┴───────────┴───────────────────────────┴─────────┴───────────────────────────┴────────────┴───────────────────────────┘
172 rows in set. Elapsed: 184.809 sec. Processed 6.74 billion rows, 89.56 GB (36.47 million rows/s., 484.62 MB/s.)
```
10. Here are the top 10 subreddits of 2022:
```sql
SELECT
subreddit,
count() AS count
FROM reddit
WHERE toYear(created_utc) = 2022
GROUP BY subreddit
ORDER BY count DESC
LIMIT 10;
```
The response is:
```response
┌─subreddit────────┬───count─┐
│ AskReddit │ 3858203 │
│ politics │ 1356782 │
│ memes │ 1249120 │
│ nfl │ 883667 │
│ worldnews │ 866065 │
│ teenagers │ 777095 │
│ AmItheAsshole │ 752720 │
│ dankmemes │ 657932 │
│ nba │ 514184 │
│ unpopularopinion │ 473649 │
└──────────────────┴─────────┘
10 rows in set. Elapsed: 27.824 sec. Processed 6.74 billion rows, 53.26 GB (242.22 million rows/s., 1.91 GB/s.)
```
11. Let's see which subreddits had the biggest increase in commnents from 2018 to 2019:
```sql
SELECT
subreddit,
newcount - oldcount AS diff
FROM
(
SELECT
subreddit,
count(*) AS newcount
FROM reddit
WHERE toYear(created_utc) = 2019
GROUP BY subreddit
)
ALL INNER JOIN
(
SELECT
subreddit,
count(*) AS oldcount
FROM reddit
WHERE toYear(created_utc) = 2018
GROUP BY subreddit
) USING (subreddit)
ORDER BY diff DESC
LIMIT 50
SETTINGS joined_subquery_requires_alias = 0;
```
It looks like memes and teenagers were busy on Reddit in 2019:
```response
┌─subreddit────────────┬─────diff─┐
│ memes │ 15368369 │
│ AskReddit │ 14663662 │
│ teenagers │ 12266991 │
│ AmItheAsshole │ 11561538 │
│ dankmemes │ 11305158 │
│ unpopularopinion │ 6332772 │
│ PewdiepieSubmissions │ 5930818 │
│ Market76 │ 5014668 │
│ relationship_advice │ 3776383 │
│ freefolk │ 3169236 │
│ Minecraft │ 3160241 │
│ classicwow │ 2907056 │
│ Animemes │ 2673398 │
│ gameofthrones │ 2402835 │
│ PublicFreakout │ 2267605 │
│ ShitPostCrusaders │ 2207266 │
│ RoastMe │ 2195715 │
│ gonewild │ 2148649 │
│ AnthemTheGame │ 1803818 │
│ entitledparents │ 1706270 │
│ MortalKombat │ 1679508 │
│ Cringetopia │ 1620555 │
│ pokemon │ 1615266 │
│ HistoryMemes │ 1608289 │
│ Brawlstars │ 1574977 │
│ iamatotalpieceofshit │ 1558315 │
│ trashy │ 1518549 │
│ ChapoTrapHouse │ 1505748 │
│ Pikabu │ 1501001 │
│ Showerthoughts │ 1475101 │
│ cursedcomments │ 1465607 │
│ ukpolitics │ 1386043 │
│ wallstreetbets │ 1384431 │
│ interestingasfuck │ 1378900 │
│ wholesomememes │ 1353333 │
│ AskOuija │ 1233263 │
│ borderlands3 │ 1197192 │
│ aww │ 1168257 │
│ insanepeoplefacebook │ 1155473 │
│ FortniteCompetitive │ 1122778 │
│ EpicSeven │ 1117380 │
│ FreeKarma4U │ 1116423 │
│ YangForPresidentHQ │ 1086700 │
│ SquaredCircle │ 1044089 │
│ MurderedByWords │ 1042511 │
│ AskMen │ 1024434 │
│ thedivision │ 1016634 │
│ barstoolsports │ 985032 │
│ nfl │ 978340 │
│ BattlefieldV │ 971408 │
└──────────────────────┴──────────┘
50 rows in set. Elapsed: 65.954 sec. Processed 13.48 billion rows, 79.67 GB (204.37 million rows/s., 1.21 GB/s.)
```
12. One more query: let's compare ClickHouse mentions to other technologies like Snowflake and Postgres. This query is a big one because it has to search all the comments three times for a substring, and unfortunately ClickHouse user are obviously not very active on Reddit yet:
```sql
SELECT
toStartOfQuarter(created_utc) AS quarter,
sum(if(positionCaseInsensitive(body, 'clickhouse') > 0, 1, 0)) AS clickhouse,
sum(if(positionCaseInsensitive(body, 'snowflake') > 0, 1, 0)) AS snowflake,
sum(if(positionCaseInsensitive(body, 'postgres') > 0, 1, 0)) AS postgres
FROM reddit
GROUP BY quarter
ORDER BY quarter ASC;
```
```response
┌────Quarter─┬─clickhouse─┬─snowflake─┬─postgres─┐
│ 2005-10-01 │ 0 │ 0 │ 0 │
│ 2006-01-01 │ 0 │ 2 │ 23 │
│ 2006-04-01 │ 0 │ 2 │ 24 │
│ 2006-07-01 │ 0 │ 4 │ 13 │
│ 2006-10-01 │ 0 │ 23 │ 73 │
│ 2007-01-01 │ 0 │ 14 │ 91 │
│ 2007-04-01 │ 0 │ 10 │ 59 │
│ 2007-07-01 │ 0 │ 39 │ 116 │
│ 2007-10-01 │ 0 │ 45 │ 125 │
│ 2008-01-01 │ 0 │ 53 │ 234 │
│ 2008-04-01 │ 0 │ 79 │ 303 │
│ 2008-07-01 │ 0 │ 102 │ 174 │
│ 2008-10-01 │ 0 │ 156 │ 323 │
│ 2009-01-01 │ 0 │ 206 │ 208 │
│ 2009-04-01 │ 0 │ 178 │ 417 │
│ 2009-07-01 │ 0 │ 300 │ 295 │
│ 2009-10-01 │ 0 │ 633 │ 589 │
│ 2010-01-01 │ 0 │ 555 │ 501 │
│ 2010-04-01 │ 0 │ 587 │ 469 │
│ 2010-07-01 │ 0 │ 770 │ 821 │
│ 2010-10-01 │ 0 │ 1480 │ 550 │
│ 2011-01-01 │ 0 │ 1482 │ 568 │
│ 2011-04-01 │ 0 │ 1558 │ 406 │
│ 2011-07-01 │ 0 │ 2163 │ 628 │
│ 2011-10-01 │ 0 │ 4064 │ 566 │
│ 2012-01-01 │ 0 │ 4621 │ 662 │
│ 2012-04-01 │ 0 │ 5737 │ 785 │
│ 2012-07-01 │ 0 │ 6097 │ 1127 │
│ 2012-10-01 │ 0 │ 7986 │ 600 │
│ 2013-01-01 │ 0 │ 9704 │ 839 │
│ 2013-04-01 │ 0 │ 8161 │ 853 │
│ 2013-07-01 │ 0 │ 9704 │ 1028 │
│ 2013-10-01 │ 0 │ 12879 │ 1404 │
│ 2014-01-01 │ 0 │ 12317 │ 1548 │
│ 2014-04-01 │ 0 │ 13181 │ 1577 │
│ 2014-07-01 │ 0 │ 15640 │ 1710 │
│ 2014-10-01 │ 0 │ 19479 │ 1959 │
│ 2015-01-01 │ 0 │ 20411 │ 2104 │
│ 2015-04-01 │ 1 │ 20309 │ 9112 │
│ 2015-07-01 │ 0 │ 20325 │ 4771 │
│ 2015-10-01 │ 0 │ 25087 │ 3030 │
│ 2016-01-01 │ 0 │ 23462 │ 3126 │
│ 2016-04-01 │ 3 │ 25496 │ 2757 │
│ 2016-07-01 │ 4 │ 28233 │ 2928 │
│ 2016-10-01 │ 2 │ 45445 │ 2449 │
│ 2017-01-01 │ 9 │ 76019 │ 2808 │
│ 2017-04-01 │ 9 │ 67919 │ 2803 │
│ 2017-07-01 │ 13 │ 68974 │ 2771 │
│ 2017-10-01 │ 12 │ 69730 │ 2906 │
│ 2018-01-01 │ 17 │ 67476 │ 3152 │
│ 2018-04-01 │ 3 │ 67139 │ 3986 │
│ 2018-07-01 │ 14 │ 67979 │ 3609 │
│ 2018-10-01 │ 28 │ 74147 │ 3850 │
│ 2019-01-01 │ 14 │ 80250 │ 4305 │
│ 2019-04-01 │ 30 │ 70307 │ 3872 │
│ 2019-07-01 │ 33 │ 77149 │ 4164 │
│ 2019-10-01 │ 13 │ 76746 │ 3541 │
│ 2020-01-01 │ 16 │ 54475 │ 846 │
└────────────┴────────────┴───────────┴──────────┘
58 rows in set. Elapsed: 2663.751 sec. Processed 6.74 billion rows, 1.21 TB (2.53 million rows/s., 454.37 MB/s.)
```

View File

@ -2,34 +2,115 @@
slug: /en/operations/named-collections
sidebar_position: 69
sidebar_label: "Named collections"
title: "Named collections"
---
# Storing details for connecting to external sources in configuration files
Named collections provide a way to store collections of key-value pairs to be
used to configure integrations with external sources. You can use named collections with
dictionaries, tables, table functions, and object storage.
Details for connecting to external sources (dictionaries, tables, table functions) can be saved
in configuration files and thus simplify the creation of objects and hide credentials
from users with only SQL access.
Named collections can be configured with DDL or in configuration files and are applied
when ClickHouse starts. They simplify the creation of objects and the hiding of credentials
from users without administrative access.
Parameters can be set in XML `<format>CSV</format>` and overridden in SQL `, format = 'TSV'`.
The parameters in SQL can be overridden using format `key` = `value`: `compression_method = 'gzip'`.
The keys in a named collection must match the parameter names of the corresponding
function, table engine, database, etc. In the examples below the parameter list is
linked to for each type.
Named collections are stored in the `config.xml` file of the ClickHouse server in the `<named_collections>` section and are applied when ClickHouse starts.
Parameters set in a named collection can be overridden in SQL, this is shown in the examples
below.
Example of configuration:
```xml
$ cat /etc/clickhouse-server/config.d/named_collections.xml
## Storing named collections in the system database
### DDL example
```sql
CREATE NAMED COLLECTION name AS
key_1 = 'value',
key_2 = 'value2',
url = 'https://connection.url/'
```
### Permissions to create named collections with DDL
To manage named collections with DDL a user must have the `named_control_collection` privilege. This can be assigned by adding a file to `/etc/clickhouse-server/users.d/`. The example gives the user `default` both the `access_management` and `named_collection_control` privileges:
```xml title='/etc/clickhouse-server/users.d/user_default.xml'
<clickhouse>
<users>
<default>
<password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex replace=true>
<access_management>1</access_management>
<!-- highlight-start -->
<named_collection_control>1</named_collection_control>
<!-- highlight-end -->
</default>
</users>
</clickhouse>
```
:::tip
In the above example the `passowrd_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user.
:::
## Storing named collections in configuration files
### XML example
```xml title='/etc/clickhouse-server/config.d/named_collections.xml'
<clickhouse>
<named_collections>
...
<name>
<key_1>value</key_1>
<key_2>value_2</key_2>
<url>https://connection.url/</url>
</name>
</named_collections>
</clickhouse>
```
## Named collections for accessing S3.
## Modifying named collections
Named collections that are created with DDL queries can be altered or dropped with DDL. Named collections created with XML files can be managed by editing or deleting the corresponding XML.
### Alter a DDL named collection
Change or add the keys `key1` and `key3` of the collection `collection2`:
```sql
ALTER NAMED COLLECTION collection2 SET key1=4, key3='value3'
```
Remove the key `key2` from `collection2`:
```sql
ALTER NAMED COLLECTION collection2 DELETE key2
```
Change or add the key `key1` and delete the key `key3` of the collection `collection2`:
```sql
ALTER NAMED COLLECTION collection2 SET key1=4, DELETE key3
```
### Drop the DDL named collection `collection2`:
```sql
DROP NAMED COLLECTION collection2
```
## Named collections for accessing S3
The description of parameters see [s3 Table Function](../sql-reference/table-functions/s3.md).
Example of configuration:
### DDL example
```sql
CREATE NAMED COLLECTION s3_mydata AS
access_key_id = 'AKIAIOSFODNN7EXAMPLE',
secret_access_key = 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
format = 'CSV',
url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/'
```
### XML example
```xml
<clickhouse>
<named_collections>
@ -43,23 +124,23 @@ Example of configuration:
</clickhouse>
```
### Example of using named collections with the s3 function
### s3() function and S3 Table named collection examples
Both of the following examples use the same named collection `s3_mydata`:
#### s3() function
```sql
INSERT INTO FUNCTION s3(s3_mydata, filename = 'test_file.tsv.gz',
format = 'TSV', structure = 'number UInt64', compression_method = 'gzip')
SELECT * FROM numbers(10000);
SELECT count()
FROM s3(s3_mydata, filename = 'test_file.tsv.gz')
┌─count()─┐
│ 10000 │
└─────────┘
1 rows in set. Elapsed: 0.279 sec. Processed 10.00 thousand rows, 90.00 KB (35.78 thousand rows/s., 322.02 KB/s.)
```
### Example of using named collections with an S3 table
:::tip
The first argument to the `s3()` function above is the name of the collection, `s3_mydata`. Without named collections, the access key ID, secret, format, and URL would all be passed in every call to the `s3()` function.
:::
#### S3 table
```sql
CREATE TABLE s3_engine_table (number Int64)
@ -78,7 +159,22 @@ SELECT * FROM s3_engine_table LIMIT 3;
The description of parameters see [mysql](../sql-reference/table-functions/mysql.md).
Example of configuration:
### DDL example
```sql
CREATE NAMED COLLECTION mymysql AS
user = 'myuser',
password = 'mypass',
host = '127.0.0.1',
port = 3306,
database = 'test'
connection_pool_size = 8
on_duplicate_clause = 1
replace_query = 1
```
### XML example
```xml
<clickhouse>
<named_collections>
@ -96,7 +192,11 @@ Example of configuration:
</clickhouse>
```
### Example of using named collections with the mysql function
### mysql() function, MySQL table, MySQL database, and Dictionary named collection examples
The four following examples use the same named collection `mymysql`:
#### mysql() function
```sql
SELECT count() FROM mysql(mymysql, table = 'test');
@ -105,8 +205,11 @@ SELECT count() FROM mysql(mymysql, table = 'test');
│ 3 │
└─────────┘
```
:::note
The named collection does not specify the `table` parameter, so it is specified in the function call as `table = 'test'`.
:::
### Example of using named collections with an MySQL table
#### MySQL table
```sql
CREATE TABLE mytable(A Int64) ENGINE = MySQL(mymysql, table = 'test', connection_pool_size=3, replace_query=0);
@ -117,7 +220,11 @@ SELECT count() FROM mytable;
└─────────┘
```
### Example of using named collections with database with engine MySQL
:::note
The DDL overrides the named collection setting for connection_pool_size.
:::
#### MySQL database
```sql
CREATE DATABASE mydatabase ENGINE = MySQL(mymysql);
@ -130,7 +237,7 @@ SHOW TABLES FROM mydatabase;
└────────┘
```
### Example of using named collections with a dictionary with source MySQL
#### MySQL Dictionary
```sql
CREATE DICTIONARY dict (A Int64, B String)
@ -150,6 +257,17 @@ SELECT dictGet('dict', 'B', 2);
The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md).
```sql
CREATE NAMED COLLECTION mypg AS
user = 'pguser',
password = 'jw8s0F4',
host = '127.0.0.1',
port = 5432,
database = 'test',
schema = 'test_schema',
connection_pool_size = 8
```
Example of configuration:
```xml
<clickhouse>
@ -229,12 +347,22 @@ SELECT dictGet('dict', 'b', 2);
└─────────────────────────┘
```
## Named collections for accessing remote ClickHouse database
## Named collections for accessing a remote ClickHouse database
The description of parameters see [remote](../sql-reference/table-functions/remote.md/#parameters).
Example of configuration:
```sql
CREATE NAMED COLLECTION remote1 AS
host = 'remote_host',
port = 9000,
database = 'system',
user = 'foo',
password = 'secret',
secure = 1
```
```xml
<clickhouse>
<named_collections>
@ -286,3 +414,4 @@ SELECT dictGet('dict', 'b', 1);
│ a │
└─────────────────────────┘
```

View File

@ -452,6 +452,8 @@ Possible values:
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which dont belong to the current bucket are flushed and reassigned.
Supports `INNER/LEFT/RIGHT/FULL ALL/ANY JOIN`.
- hash
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
@ -1377,6 +1379,12 @@ Possible values:
Default value: `default`.
## allow_experimental_parallel_reading_from_replicas
If true, ClickHouse will send a SELECT query to all replicas of a table (up to `max_parallel_replicas`) . It will work for any kind of MergeTree table.
Default value: `false`.
## compile_expressions {#compile-expressions}
Enables or disables compilation of frequently used simple functions and operators to native code with LLVM at runtime.
@ -1708,7 +1716,7 @@ Default value: `100000`.
### async_insert_max_query_number {#async-insert-max-query-number}
The maximum number of insert queries per block before being inserted. This setting takes effect only if [async_insert_deduplicate](#settings-async-insert-deduplicate) is enabled.
The maximum number of insert queries per block before being inserted. This setting takes effect only if [async_insert_deduplicate](#async-insert-deduplicate) is enabled.
Possible values:
@ -1739,7 +1747,7 @@ Possible values:
Default value: `0`.
### async_insert_deduplicate {#settings-async-insert-deduplicate}
### async_insert_deduplicate {#async-insert-deduplicate}
Enables or disables insert deduplication of `ASYNC INSERT` (for Replicated\* tables).
@ -3213,17 +3221,6 @@ Possible values:
Default value: `0`.
## allow_experimental_geo_types {#allow-experimental-geo-types}
Allows working with experimental [geo data types](../../sql-reference/data-types/geo.md).
Possible values:
- 0 — Working with geo data types is disabled.
- 1 — Working with geo data types is enabled.
Default value: `0`.
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.

View File

@ -97,8 +97,8 @@ Columns:
- `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP header `X-Forwarded-For` passed in the HTTP query.
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The `quota key` specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/map.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
- `Settings` ([Map(String, String)](../../sql-reference/data-types/map.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined.
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution.
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.

View File

@ -0,0 +1,29 @@
---
slug: /en/operations/system-tables/zookeeper_connection
---
#zookeeper_connection
This table does not exist if ZooKeeper is not configured. The 'system.zookeeper_connection' table shows current connections to ZooKeeper (including auxiliary ZooKeepers). Each row shows information about one connection.
Columns:
- `name` ([String](../../sql-reference/data-types/string.md)) — ZooKeeper cluster's name.
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
- `connected_time` ([String](../../sql-reference/data-types/string.md)) — When the connection was established
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
Example:
``` sql
SELECT * FROM system.zookeeper_connection;
```
``` text
┌─name──────────────┬─host─────────┬─port─┬─index─┬──────connected_time─┬─is_expired─┬─keeper_api_version─┬──────────client_id─┐
│ default_zookeeper │ 127.0.0.1 │ 2181 │ 0 │ 2023-05-19 14:30:16 │ 0 │ 0 │ 216349144108826660 │
└───────────────────┴──────────────┴──────┴───────┴─────────────────────┴────────────┴────────────────────┴────────────────────┘
```

View File

@ -29,5 +29,5 @@ ClickHouse data types include:
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column gettings its default value for the data type)
- **IP addresses**: use [`IPv4`](./domains/ipv4.md) and [`IPv6`](./domains/ipv6.md) to efficiently store IP addresses
- **Geo types**: for[ geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)

View File

@ -267,14 +267,16 @@ or
LAYOUT(HASHED())
```
If `shards` greater then 1 (default is `1`) the dictionary will load data in parallel, useful if you have huge amount of elements in one dictionary.
Configuration example:
``` xml
<layout>
<hashed>
<!-- If shards greater then 1 (default is `1`) the dictionary will load
data in parallel, useful if you have huge amount of elements in one
dictionary. -->
<shards>10</shards>
<!-- Size of the backlog for blocks in parallel queue.
Since the bottleneck in parallel loading is rehash, and so to avoid
@ -284,6 +286,14 @@ Configuration example:
10000 is good balance between memory and speed.
Even for 10e10 elements and can handle all the load without starvation. -->
<shard_load_queue_backlog>10000</shard_load_queue_backlog>
<!-- Maximum load factor of the hash table, with greater values, the memory
is utilized more efficiently (less memory is wasted) but read/performance
may deteriorate.
Valid values: [0.5, 0.99]
Default: 0.5 -->
<max_load_factor>0.5</max_load_factor>
</hashed>
</layout>
```
@ -291,7 +301,7 @@ Configuration example:
or
``` sql
LAYOUT(HASHED(SHARDS 10 [SHARD_LOAD_QUEUE_BACKLOG 10000]))
LAYOUT(HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000] [MAX_LOAD_FACTOR 0.5]))
```
### sparse_hashed
@ -304,14 +314,18 @@ Configuration example:
``` xml
<layout>
<sparse_hashed />
<sparse_hashed>
<!-- <shards>1</shards> -->
<!-- <shard_load_queue_backlog>10000</shard_load_queue_backlog> -->
<!-- <max_load_factor>0.5</max_load_factor> -->
</sparse_hashed>
</layout>
```
or
``` sql
LAYOUT(SPARSE_HASHED())
LAYOUT(SPARSE_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000] [MAX_LOAD_FACTOR 0.5]))
```
It is also possible to use `shards` for this type of dictionary, and again it is more important for `sparse_hashed` then for `hashed`, since `sparse_hashed` is slower.
@ -325,8 +339,9 @@ Configuration example:
``` xml
<layout>
<complex_key_hashed>
<shards>1</shards>
<!-- <shards>1</shards> -->
<!-- <shard_load_queue_backlog>10000</shard_load_queue_backlog> -->
<!-- <max_load_factor>0.5</max_load_factor> -->
</complex_key_hashed>
</layout>
```
@ -334,7 +349,7 @@ Configuration example:
or
``` sql
LAYOUT(COMPLEX_KEY_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000]))
LAYOUT(COMPLEX_KEY_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000] [MAX_LOAD_FACTOR 0.5]))
```
### complex_key_sparse_hashed
@ -346,7 +361,9 @@ Configuration example:
``` xml
<layout>
<complex_key_sparse_hashed>
<shards>1</shards>
<!-- <shards>1</shards> -->
<!-- <shard_load_queue_backlog>10000</shard_load_queue_backlog> -->
<!-- <max_load_factor>0.5</max_load_factor> -->
</complex_key_sparse_hashed>
</layout>
```
@ -354,7 +371,7 @@ Configuration example:
or
``` sql
LAYOUT(COMPLEX_KEY_SPARSE_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000]))
LAYOUT(COMPLEX_KEY_SPARSE_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000] [MAX_LOAD_FACTOR 0.5]))
```
### hashed_array
@ -2197,16 +2214,16 @@ Result:
└─────────────────────────────────┴───────┘
```
## RegExp Tree Dictionary {#regexp-tree-dictionary}
## Regular Expression Tree Dictionary {#regexp-tree-dictionary}
Regexp Tree dictionary stores multiple trees of regular expressions with attributions. Users can retrieve strings in the dictionary. If a string matches the root of the regexp tree, we will collect the corresponding attributes of the matched root and continue to walk the children. If any of the children matches the string, we will collect attributes and rewrite the old ones if conflicts occur, then continue the traverse until we reach leaf nodes.
Regular expression tree dictionaries are a special type of dictionary which represent the mapping from key to attributes using a tree of regular expressions. There are some use cases, e.g. parsing of (user agent)[https://en.wikipedia.org/wiki/User_agent] strings, which can be expressed elegantly with regexp tree dictionaries.
Example of the ddl query for creating Regexp Tree dictionary:
### Use Regular Expression Tree Dictionary in ClickHouse Open-Source
<CloudDetails />
Regular expression tree dictionaries are defined in ClickHouse open-source using the YAMLRegExpTree source which is provided the path to a YAML file containing the regular expression tree.
```sql
create dictionary regexp_dict
CREATE DICTIONARY regexp_dict
(
regexp String,
name String,
@ -2218,17 +2235,15 @@ LAYOUT(regexp_tree)
...
```
**Source**
The dictionary source `YAMLRegExpTree` represents the structure of a regexp tree. For example:
We introduce a type of source called `YAMLRegExpTree` representing the structure of Regexp Tree dictionary. An Example of a valid yaml config is like:
```xml
```yaml
- regexp: 'Linux/(\d+[\.\d]*).+tlinux'
name: 'TencentOS'
version: '\1'
- regexp: '\d+/tclwebkit(?:\d+[\.\d]*)'
name: 'Andriod'
name: 'Android'
versions:
- regexp: '33/tclwebkit'
version: '13'
@ -2240,17 +2255,14 @@ We introduce a type of source called `YAMLRegExpTree` representing the structure
version: '10'
```
The key `regexp` represents the regular expression of a tree node. The name of key is same as the dictionary key. The `name` and `version` is user-defined attributions in the dicitionary. The `versions` (which can be any name that not appear in attributions or the key) indicates the children nodes of this tree.
This config consists of a list of regular expression tree nodes. Each node has the following structure:
**Back Reference**
- **regexp**: the regular expression of the node.
- **attributes**: a list of user-defined dictionary attributes. In this example, there are two attributes: `name` and `version`. The first node defines both attributes. The second node only defines attribute `name`. Attribute `version` is provided by the child nodes of the second node.
- The value of an attribute may contain **back references**, referring to capture groups of the matched regular expression. In the example, the value of attribute `version` in the first node consists of a back-reference `\1` to capture group `(\d+[\.\d]*)` in the regular expression. Back-reference numbers range from 1 to 9 and are written as `$1` or `\1` (for number 1). The back reference is replaced by the matched capture group during query execution.
- **child nodes**: a list of children of a regexp tree node, each of which has its own attributes and (potentially) children nodes. String matching proceeds in a depth-first fashion. If a string matches a regexp node, the dictionary checks if it also matches the nodes' child nodes. If that is the case, the attributes of the deepest matching node are assigned. Attributes of a child node overwrite equally named attributes of parent nodes. The name of child nodes in YAML files can be arbitrary, e.g. `versions` in above example.
The value of an attribution could contain a back reference which refers to a capture group of the matched regular expression. Reference number ranges from 1 to 9 and writes as `$1` or `\1`.
During the query execution, the back reference in the value will be replaced by the matched capture group.
**Query**
Due to the specialty of Regexp Tree dictionary, we only allow functions `dictGet`, `dictGetOrDefault` and `dictGetOrNull` work with it.
Regexp tree dictionaries only allow access using functions `dictGet`, `dictGetOrDefault` and `dictGetOrNull`.
Example:
@ -2260,12 +2272,83 @@ SELECT dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024');
Result:
```
```text
┌─dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024')─┐
│ ('Andriod','12') │
│ ('Android','12') │
└─────────────────────────────────────────────────────────────────┘
```
In this case, we first match the regular expression `\d+/tclwebkit(?:\d+[\.\d]*)` in the top layer's second node. The dictionary then continues to look into the child nodes and finds that the string also matches `3[12]/tclwebkit`. As a result, the value of attribute `name` is `Android` (defined in the first layer) and the value of attribute `version` is `12` (defined the child node).
With a powerful YAML configure file, we can use a regexp tree dictionaries as a user agent string parser. We support [uap-core](https://github.com/ua-parser/uap-core) and demonstrate how to use it in the functional test [02504_regexp_dictionary_ua_parser](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh)
### Use Regular Expression Tree Dictionary in ClickHouse Cloud
Above used `YAMLRegExpTree` source works in ClickHouse Open Source but not in ClickHouse Cloud. To use regexp tree dictionaries in ClickHouse could, first create a regexp tree dictionary from a YAML file locally in ClickHouse Open Source, then dump this dictionary into a CSV file using the `dictionary` table function and the [INTO OUTFILE](../statements/select/into-outfile.md) clause.
```sql
SELECT * FROM dictionary(regexp_dict) INTO OUTFILE('regexp_dict.csv')
```
The content of csv file is:
```text
1,0,"Linux/(\d+[\.\d]*).+tlinux","['version','name']","['\\1','TencentOS']"
2,0,"(\d+)/tclwebkit(\d+[\.\d]*)","['comment','version','name']","['test $1 and $2','$1','Android']"
3,2,"33/tclwebkit","['version']","['13']"
4,2,"3[12]/tclwebkit","['version']","['12']"
5,2,"3[12]/tclwebkit","['version']","['11']"
6,2,"3[12]/tclwebkit","['version']","['10']"
```
The schema of dumped file is:
- `id UInt64`: the id of the RegexpTree node.
- `parent_id UInt64`: the id of the parent of a node.
- `regexp String`: the regular expression string.
- `keys Array(String)`: the names of user-defined attributes.
- `values Array(String)`: the values of user-defined attributes.
To create the dictionary in ClickHouse Cloud, first create a table `regexp_dictionary_source_table` with below table structure:
```sql
CREATE TABLE regexp_dictionary_source_table
(
id UInt64,
parent_id UInt64,
regexp String,
keys Array(String),
values Array(String)
) ENGINE=Memory;
```
Then update the local CSV by
```bash
clickhouse client \
--host MY_HOST \
--secure \
--password MY_PASSWORD \
--query "
INSERT INTO regexp_dictionary_source_table
SELECT * FROM input ('id UInt64, parent_id UInt64, regexp String, keys Array(String), values Array(String)')
FORMAT CSV" < regexp_dict.csv
```
You can see how to [Insert Local Files](https://clickhouse.com/docs/en/integrations/data-ingestion/insert-local-files) for more details. After we initialize the source table, we can create a RegexpTree by table source:
``` sql
CREATE DICTIONARY regexp_dict
(
regexp String,
name String,
version String
PRIMARY KEY(regexp)
SOURCE(CLICKHOUSE(TABLE 'regexp_dictionary_source_table'))
LIFETIME(0)
LAYOUT(regexp_tree);
```
## Embedded Dictionaries {#embedded-dictionaries}
<SelfManaged />

View File

@ -284,13 +284,17 @@ Manipulates data in the specifies partition matching the specified filtering exp
Syntax:
``` sql
ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] [IN PARTITION partition_id] WHERE filter_expr
ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] [IN PARTITION partition_expr] WHERE filter_expr
```
### Example
``` sql
-- using partition name
ALTER TABLE mt UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2;
-- using partition id
ALTER TABLE mt UPDATE x = x + 1 IN PARTITION ID '2' WHERE p = 2;
```
### See Also
@ -304,13 +308,17 @@ Deletes data in the specifies partition matching the specified filtering express
Syntax:
``` sql
ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE [IN PARTITION partition_id] WHERE filter_expr
ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE [IN PARTITION partition_expr] WHERE filter_expr
```
### Example
``` sql
-- using partition name
ALTER TABLE mt DELETE IN PARTITION 2 WHERE p = 2;
-- using partition id
ALTER TABLE mt DELETE IN PARTITION ID '2' WHERE p = 2;
```
### See Also

View File

@ -9,7 +9,7 @@ sidebar_label: GRANT
- Grants [privileges](#grant-privileges) to ClickHouse user accounts or roles.
- Assigns roles to user accounts or to the other roles.
To revoke privileges, use the [REVOKE](../../sql-reference/statements/revoke.md) statement. Also you can list granted privileges with the [SHOW GRANTS](../../sql-reference/statements/show.md#show-grants-statement) statement.
To revoke privileges, use the [REVOKE](../../sql-reference/statements/revoke.md) statement. Also you can list granted privileges with the [SHOW GRANTS](../../sql-reference/statements/show.md#show-grants) statement.
## Granting Privilege Syntax

View File

@ -3185,16 +3185,6 @@ SELECT * FROM test2;
Значение по умолчанию: `0`.
## allow_experimental_geo_types {#allow-experimental-geo-types}
Разрешает использование экспериментальных типов данных для работы с [географическими структурами](../../sql-reference/data-types/geo.md).
Возможные значения:
- 0 — использование типов данных для работы с географическими структурами не поддерживается.
- 1 — использование типов данных для работы с географическими структурами поддерживается.
Значение по умолчанию: `0`.
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
Добавляет модификатор `SYNC` ко всем запросам `DROP` и `DETACH`.

View File

@ -8,13 +8,8 @@ sidebar_label: Географические структуры
ClickHouse поддерживает типы данных для отображения географических объектов — точек (местоположений), территорий и т.п.
:::danger "Предупреждение"
Сейчас использование типов данных для работы с географическими структурами является экспериментальной возможностью. Чтобы использовать эти типы данных, включите настройку `allow_experimental_geo_types = 1`.
:::
**См. также**
- [Хранение географических структур данных](https://ru.wikipedia.org/wiki/GeoJSON).
- Настройка [allow_experimental_geo_types](../../operations/settings/settings.md#allow-experimental-geo-types).
## Point {#point-data-type}
@ -25,7 +20,6 @@ ClickHouse поддерживает типы данных для отображ
Запрос:
```sql
SET allow_experimental_geo_types = 1;
CREATE TABLE geo_point (p Point) ENGINE = Memory();
INSERT INTO geo_point VALUES((10, 10));
SELECT p, toTypeName(p) FROM geo_point;
@ -47,7 +41,6 @@ SELECT p, toTypeName(p) FROM geo_point;
Запрос:
```sql
SET allow_experimental_geo_types = 1;
CREATE TABLE geo_ring (r Ring) ENGINE = Memory();
INSERT INTO geo_ring VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]);
SELECT r, toTypeName(r) FROM geo_ring;
@ -69,7 +62,6 @@ SELECT r, toTypeName(r) FROM geo_ring;
Запись в этой таблице описывает многоугольник с одной дырой:
```sql
SET allow_experimental_geo_types = 1;
CREATE TABLE geo_polygon (pg Polygon) ENGINE = Memory();
INSERT INTO geo_polygon VALUES([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]]);
SELECT pg, toTypeName(pg) FROM geo_polygon;
@ -92,7 +84,6 @@ SELECT pg, toTypeName(pg) FROM geo_polygon;
Запись в этой таблице описывает элемент, состоящий из двух многоугольников — первый без дыр, а второй с одной дырой:
```sql
SET allow_experimental_geo_types = 1;
CREATE TABLE geo_multipolygon (mpg MultiPolygon) ENGINE = Memory();
INSERT INTO geo_multipolygon VALUES([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]);
SELECT mpg, toTypeName(mpg) FROM geo_multipolygon;

View File

@ -55,7 +55,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
同样 `john` 有权执行 `GRANT OPTION`,因此他能给其它账号进行和自己账号权限范围相同的授权。
可以使用`*` 号代替表或库名进行授权操作。例如, `GRANT SELECT ONdb.* TO john` 操作运行 `john``db`库的所有表执行 `SELECT`查询。同样,你可以忽略库名。在这种情形下,权限将指向当前的数据库。例如, `GRANT SELECT ON* to john` 对当前数据库的所有表指定授权, `GARNT SELECT ON mytable to john`对当前数据库的 `mytable`表进行授权。
可以使用`*` 号代替表或库名进行授权操作。例如, `GRANT SELECT ONdb.* TO john` 操作运行 `john``db`库的所有表执行 `SELECT`查询。同样,你可以忽略库名。在这种情形下,权限将指向当前的数据库。例如, `GRANT SELECT ON* to john` 对当前数据库的所有表指定授权, `GRANT SELECT ON mytable to john`对当前数据库的 `mytable`表进行授权。
访问 `systen`数据库总是被允许的因为这个数据库用来处理sql操作
可以一次给多个账号进行多种授权操作。 `GRANT SELECT,INSERT ON *.* TO john,robin` 允许 `john`和`robin` 账号对任意数据库的任意表执行 `INSERT``SELECT`操作。

View File

@ -4,10 +4,10 @@ if (NOT(
AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR
)
)
set (COMPRESSOR "${CMAKE_BINARY_DIR}/native/utils/self-extracting-executable/pre_compressor")
set (DECOMPRESSOR "--decompressor=${CMAKE_BINARY_DIR}/utils/self-extracting-executable/decompressor")
set (COMPRESSOR "${PROJECT_BINARY_DIR}/native/utils/self-extracting-executable/pre_compressor")
set (DECOMPRESSOR "--decompressor=${PROJECT_BINARY_DIR}/utils/self-extracting-executable/decompressor")
else ()
set (COMPRESSOR "${CMAKE_BINARY_DIR}/utils/self-extracting-executable/compressor")
set (COMPRESSOR "${PROJECT_BINARY_DIR}/utils/self-extracting-executable/compressor")
endif ()
add_custom_target (self-extracting ALL

View File

@ -719,8 +719,12 @@
<!-- Default profile of settings. -->
<default_profile>default</default_profile>
<!-- Comma-separated list of prefixes for user-defined settings. -->
<custom_settings_prefixes></custom_settings_prefixes>
<!-- Comma-separated list of prefixes for user-defined settings.
The server will allow to set these settings, and retrieve them with the getSetting function.
They are also logged in the query_log, similarly to other settings, but have no special effect.
The "SQL_" prefix is introduced for compatibility with MySQL - these settings are being set be Tableau.
-->
<custom_settings_prefixes>SQL_</custom_settings_prefixes>
<!-- System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). -->
<!-- <system_profile>default</system_profile> -->

View File

@ -121,7 +121,7 @@ AggregateFunctionPtr createAggregateFunctionGroupArraySample(
void registerAggregateFunctionGroupArray(AggregateFunctionFactory & factory)
{
AggregateFunctionProperties properties = { .returns_default_when_only_null = true, .is_order_dependent = true };
AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true };
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });

View File

@ -72,7 +72,7 @@ public:
{
/// Currently the only functions that returns not-NULL on all NULL arguments are count and uniq, and they returns UInt64.
if (properties.returns_default_when_only_null)
return std::make_shared<AggregateFunctionNothing>(arguments, params, nested_function->getResultType());
return std::make_shared<AggregateFunctionNothing>(arguments, params, std::make_shared<DataTypeUInt64>());
else
return std::make_shared<AggregateFunctionNothing>(arguments, params, std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>()));
}

View File

@ -209,15 +209,20 @@ ASTPtr FunctionNode::toASTImpl(const ConvertToASTOptions & options) const
function_ast->kind = ASTFunction::Kind::WINDOW_FUNCTION;
}
auto new_options = options;
/// To avoid surrounding constants with several internal casts.
if (function_name == "_CAST" && (*getArguments().begin())->getNodeType() == QueryTreeNodeType::CONSTANT)
new_options.add_cast_for_constants = false;
const auto & parameters = getParameters();
if (!parameters.getNodes().empty())
{
function_ast->children.push_back(parameters.toAST(options));
function_ast->children.push_back(parameters.toAST(new_options));
function_ast->parameters = function_ast->children.back();
}
const auto & arguments = getArguments();
function_ast->children.push_back(arguments.toAST(options));
function_ast->children.push_back(arguments.toAST(new_options));
function_ast->arguments = function_ast->children.back();
auto window_node = getWindowNode();
@ -226,7 +231,7 @@ ASTPtr FunctionNode::toASTImpl(const ConvertToASTOptions & options) const
if (auto * identifier_node = window_node->as<IdentifierNode>())
function_ast->window_name = identifier_node->getIdentifier().getFullName();
else
function_ast->window_definition = window_node->toAST(options);
function_ast->window_definition = window_node->toAST(new_options);
}
return function_ast;

View File

@ -38,6 +38,9 @@ public:
if (!query->hasGroupBy())
return;
if (query->isGroupByWithCube() || query->isGroupByWithRollup())
return;
auto & group_by = query->getGroupBy().getNodes();
if (query->isGroupByWithGroupingSets())
{

View File

@ -115,6 +115,7 @@ namespace
writeBinary(info.checksum, out);
writeBinary(info.base_size, out);
writeBinary(info.base_checksum, out);
writeBinary(info.encrypted_by_disk, out);
/// We don't store `info.data_file_name` and `info.data_file_index` because they're determined automalically
/// after reading file infos for all the hosts (see the class BackupCoordinationFileInfos).
}
@ -136,6 +137,7 @@ namespace
readBinary(info.checksum, in);
readBinary(info.base_size, in);
readBinary(info.base_checksum, in);
readBinary(info.encrypted_by_disk, in);
}
return res;
}
@ -254,7 +256,10 @@ void BackupCoordinationRemote::removeAllNodes()
void BackupCoordinationRemote::setStage(const String & new_stage, const String & message)
{
stage_sync->set(current_host, new_stage, message);
if (is_internal)
stage_sync->set(current_host, new_stage, message);
else
stage_sync->set(current_host, new_stage, /* message */ "", /* all_hosts */ true);
}
void BackupCoordinationRemote::setError(const Exception & exception)
@ -777,8 +782,8 @@ bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic<size_t> &)
String status;
if (zk->tryGet(root_zookeeper_path + "/" + existing_backup_path + "/stage", status))
{
/// If status is not COMPLETED it could be because the backup failed, check if 'error' exists
if (status != Stage::COMPLETED && !zk->exists(root_zookeeper_path + "/" + existing_backup_path + "/error"))
/// Check if some other backup is in progress
if (status == Stage::SCHEDULED_TO_START)
{
LOG_WARNING(log, "Found a concurrent backup: {}, current backup: {}", existing_backup_uuid, toString(backup_uuid));
result = true;

View File

@ -43,6 +43,10 @@ namespace BackupCoordinationStage
/// Coordination stage meaning that a host finished its work.
constexpr const char * COMPLETED = "completed";
/// Coordination stage meaning that backup/restore has failed due to an error
/// Check '/error' for the error message
constexpr const char * ERROR = "error";
}
}

View File

@ -8,11 +8,13 @@
#include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromString.h>
#include <IO/WriteHelpers.h>
#include <Backups/BackupCoordinationStage.h>
namespace DB
{
namespace Stage = BackupCoordinationStage;
namespace ErrorCodes
{
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
@ -42,7 +44,7 @@ void BackupCoordinationStageSync::createRootNodes()
});
}
void BackupCoordinationStageSync::set(const String & current_host, const String & new_stage, const String & message)
void BackupCoordinationStageSync::set(const String & current_host, const String & new_stage, const String & message, const bool & all_hosts)
{
auto holder = with_retries.createRetriesControlHolder("set");
holder.retries_ctl.retryLoop(
@ -50,14 +52,23 @@ void BackupCoordinationStageSync::set(const String & current_host, const String
{
with_retries.renewZooKeeper(zookeeper);
/// Make an ephemeral node so the initiator can track if the current host is still working.
String alive_node_path = zookeeper_path + "/alive|" + current_host;
auto code = zookeeper->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral);
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS)
throw zkutil::KeeperException(code, alive_node_path);
if (all_hosts)
{
auto code = zookeeper->trySet(zookeeper_path, new_stage);
if (code != Coordination::Error::ZOK)
throw zkutil::KeeperException(code, zookeeper_path);
}
else
{
/// Make an ephemeral node so the initiator can track if the current host is still working.
String alive_node_path = zookeeper_path + "/alive|" + current_host;
auto code = zookeeper->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral);
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS)
throw zkutil::KeeperException(code, alive_node_path);
zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, "");
zookeeper->createIfNotExists(zookeeper_path + "/current|" + current_host + "|" + new_stage, message);
zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, "");
zookeeper->createIfNotExists(zookeeper_path + "/current|" + current_host + "|" + new_stage, message);
}
});
}
@ -73,6 +84,10 @@ void BackupCoordinationStageSync::setError(const String & current_host, const Ex
writeStringBinary(current_host, buf);
writeException(exception, buf, true);
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
if (code != Coordination::Error::ZOK)
throw zkutil::KeeperException(code, zookeeper_path);
});
}

View File

@ -15,7 +15,7 @@ public:
Poco::Logger * log_);
/// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that.
void set(const String & current_host, const String & new_stage, const String & message);
void set(const String & current_host, const String & new_stage, const String & message, const bool & all_hosts = false);
void setError(const String & current_host, const Exception & exception);
/// Sets the stage of the current host and waits until all hosts come to the same stage.

View File

@ -1,26 +1,45 @@
#include <Backups/BackupEntryFromAppendOnlyFile.h>
#include <Disks/IDisk.h>
#include <IO/LimitSeekableReadBuffer.h>
namespace DB
{
namespace
{
/// For append-only files we must calculate its size on the construction of a backup entry.
UInt64 calculateSize(const DiskPtr & disk, const String & file_path, bool copy_encrypted, std::optional<UInt64> unencrypted_file_size)
{
if (!unencrypted_file_size)
return copy_encrypted ? disk->getEncryptedFileSize(file_path) : disk->getFileSize(file_path);
else if (copy_encrypted)
return disk->getEncryptedFileSize(*unencrypted_file_size);
else
return *unencrypted_file_size;
}
}
BackupEntryFromAppendOnlyFile::BackupEntryFromAppendOnlyFile(
const DiskPtr & disk_,
const String & file_path_,
const ReadSettings & settings_,
const std::optional<UInt64> & file_size_,
const std::optional<UInt128> & checksum_,
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_)
: BackupEntryFromImmutableFile(disk_, file_path_, settings_, file_size_, checksum_, temporary_file_)
, limit(BackupEntryFromImmutableFile::getSize())
const DiskPtr & disk_, const String & file_path_, bool copy_encrypted_, const std::optional<UInt64> & file_size_)
: disk(disk_)
, file_path(file_path_)
, data_source_description(disk->getDataSourceDescription())
, copy_encrypted(copy_encrypted_ && data_source_description.is_encrypted)
, size(calculateSize(disk_, file_path_, copy_encrypted, file_size_))
{
}
std::unique_ptr<SeekableReadBuffer> BackupEntryFromAppendOnlyFile::getReadBuffer() const
BackupEntryFromAppendOnlyFile::~BackupEntryFromAppendOnlyFile() = default;
std::unique_ptr<SeekableReadBuffer> BackupEntryFromAppendOnlyFile::getReadBuffer(const ReadSettings & read_settings) const
{
auto buf = BackupEntryFromImmutableFile::getReadBuffer();
return std::make_unique<LimitSeekableReadBuffer>(std::move(buf), 0, limit);
std::unique_ptr<SeekableReadBuffer> buf;
if (copy_encrypted)
buf = disk->readEncryptedFile(file_path, read_settings.adjustBufferSize(size));
else
buf = disk->readFile(file_path, read_settings.adjustBufferSize(size));
return std::make_unique<LimitSeekableReadBuffer>(std::move(buf), 0, size);
}
}

View File

@ -1,6 +1,6 @@
#pragma once
#include <Backups/BackupEntryFromImmutableFile.h>
#include <Backups/BackupEntryWithChecksumCalculation.h>
namespace DB
@ -8,24 +8,34 @@ namespace DB
/// Represents a file prepared to be included in a backup, assuming that until this backup entry is destroyed
/// the file can be appended with new data, but the bytes which are already in the file won't be changed.
class BackupEntryFromAppendOnlyFile : public BackupEntryFromImmutableFile
class BackupEntryFromAppendOnlyFile : public BackupEntryWithChecksumCalculation<IBackupEntry>
{
public:
/// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data.
/// The constructor is allowed to not set `file_size_`, in that case it will be calculated from the data.
BackupEntryFromAppendOnlyFile(
const DiskPtr & disk_,
const String & file_path_,
const ReadSettings & settings_,
const std::optional<UInt64> & file_size_ = {},
const std::optional<UInt128> & checksum_ = {},
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_ = {});
bool copy_encrypted_ = false,
const std::optional<UInt64> & file_size_ = {});
UInt64 getSize() const override { return limit; }
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override;
~BackupEntryFromAppendOnlyFile() override;
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const override;
UInt64 getSize() const override { return size; }
DataSourceDescription getDataSourceDescription() const override { return data_source_description; }
bool isEncryptedByDisk() const override { return copy_encrypted; }
bool isFromFile() const override { return true; }
DiskPtr getDisk() const override { return disk; }
String getFilePath() const override { return file_path; }
private:
const UInt64 limit;
const DiskPtr disk;
const String file_path;
const DataSourceDescription data_source_description;
const bool copy_encrypted;
const UInt64 size;
};
}

View File

@ -1,53 +1,84 @@
#include <Backups/BackupEntryFromImmutableFile.h>
#include <Disks/IDisk.h>
#include <Disks/IO/createReadBufferFromFileBase.h>
#include <Poco/File.h>
#include <Common/filesystemHelpers.h>
namespace DB
{
namespace
{
/// We mix the checksum calculated for non-encrypted data with IV generated to encrypt the file
/// to generate kind of a checksum for encrypted data. Of course it differs from the CityHash properly calculated for encrypted data.
UInt128 combineChecksums(UInt128 checksum1, UInt128 checksum2)
{
chassert(std::size(checksum2.items) == 2);
return CityHash_v1_0_2::CityHash128WithSeed(reinterpret_cast<const char *>(&checksum1), sizeof(checksum1), {checksum2.items[0], checksum2.items[1]});
}
}
BackupEntryFromImmutableFile::BackupEntryFromImmutableFile(
const DiskPtr & disk_,
const String & file_path_,
const ReadSettings & settings_,
bool copy_encrypted_,
const std::optional<UInt64> & file_size_,
const std::optional<UInt128> & checksum_,
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_)
const std::optional<UInt128> & checksum_)
: disk(disk_)
, file_path(file_path_)
, settings(settings_)
, data_source_description(disk->getDataSourceDescription())
, copy_encrypted(copy_encrypted_ && data_source_description.is_encrypted)
, file_size(file_size_)
, checksum(checksum_)
, temporary_file_on_disk(temporary_file_)
{
}
BackupEntryFromImmutableFile::~BackupEntryFromImmutableFile() = default;
std::unique_ptr<SeekableReadBuffer> BackupEntryFromImmutableFile::getReadBuffer(const ReadSettings & read_settings) const
{
if (copy_encrypted)
return disk->readEncryptedFile(file_path, read_settings);
else
return disk->readFile(file_path, read_settings);
}
UInt64 BackupEntryFromImmutableFile::getSize() const
{
std::lock_guard lock{get_file_size_mutex};
if (!file_size)
file_size = disk->getFileSize(file_path);
std::lock_guard lock{size_and_checksum_mutex};
if (!file_size_adjusted)
{
if (!file_size)
file_size = copy_encrypted ? disk->getEncryptedFileSize(file_path) : disk->getFileSize(file_path);
else if (copy_encrypted)
file_size = disk->getEncryptedFileSize(*file_size);
file_size_adjusted = true;
}
return *file_size;
}
std::unique_ptr<SeekableReadBuffer> BackupEntryFromImmutableFile::getReadBuffer() const
UInt128 BackupEntryFromImmutableFile::getChecksum() const
{
return disk->readFile(file_path, settings);
std::lock_guard lock{size_and_checksum_mutex};
if (!checksum_adjusted)
{
if (!checksum)
checksum = BackupEntryWithChecksumCalculation<IBackupEntry>::getChecksum();
else if (copy_encrypted)
checksum = combineChecksums(*checksum, disk->getEncryptedFileIV(file_path));
checksum_adjusted = true;
}
return *checksum;
}
DataSourceDescription BackupEntryFromImmutableFile::getDataSourceDescription() const
std::optional<UInt128> BackupEntryFromImmutableFile::getPartialChecksum(size_t prefix_length) const
{
return disk->getDataSourceDescription();
}
if (prefix_length == 0)
return 0;
String BackupEntryFromImmutableFile::getFilePath() const
{
return file_path;
if (prefix_length >= getSize())
return getChecksum();
/// For immutable files we don't use partial checksums.
return std::nullopt;
}
}

View File

@ -1,49 +1,53 @@
#pragma once
#include <Backups/IBackupEntry.h>
#include <IO/ReadSettings.h>
#include <Backups/BackupEntryWithChecksumCalculation.h>
#include <base/defines.h>
#include <mutex>
namespace DB
{
class TemporaryFileOnDisk;
class IDisk;
using DiskPtr = std::shared_ptr<IDisk>;
/// Represents a file prepared to be included in a backup, assuming that until this backup entry is destroyed the file won't be changed.
class BackupEntryFromImmutableFile : public IBackupEntry
class BackupEntryFromImmutableFile : public BackupEntryWithChecksumCalculation<IBackupEntry>
{
public:
/// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data.
BackupEntryFromImmutableFile(
const DiskPtr & disk_,
const String & file_path_,
const ReadSettings & settings_,
bool copy_encrypted_ = false,
const std::optional<UInt64> & file_size_ = {},
const std::optional<UInt128> & checksum_ = {},
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_ = {});
const std::optional<UInt128> & checksum_ = {});
~BackupEntryFromImmutableFile() override;
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const override;
UInt64 getSize() const override;
std::optional<UInt128> getChecksum() const override { return checksum; }
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override;
UInt128 getChecksum() const override;
std::optional<UInt128> getPartialChecksum(size_t prefix_length) const override;
String getFilePath() const override;
DataSourceDescription getDataSourceDescription() const override;
DataSourceDescription getDataSourceDescription() const override { return data_source_description; }
bool isEncryptedByDisk() const override { return copy_encrypted; }
DiskPtr tryGetDiskIfExists() const override { return disk; }
bool isFromFile() const override { return true; }
bool isFromImmutableFile() const override { return true; }
DiskPtr getDisk() const override { return disk; }
String getFilePath() const override { return file_path; }
private:
const DiskPtr disk;
const String file_path;
ReadSettings settings;
mutable std::optional<UInt64> file_size TSA_GUARDED_BY(get_file_size_mutex);
mutable std::mutex get_file_size_mutex;
const std::optional<UInt128> checksum;
const std::shared_ptr<TemporaryFileOnDisk> temporary_file_on_disk;
const DataSourceDescription data_source_description;
const bool copy_encrypted;
mutable std::optional<UInt64> file_size;
mutable std::optional<UInt64> checksum;
mutable bool file_size_adjusted = false;
mutable bool checksum_adjusted = false;
mutable std::mutex size_and_checksum_mutex;
};
}

View File

@ -5,17 +5,16 @@
namespace DB
{
BackupEntryFromMemory::BackupEntryFromMemory(const void * data_, size_t size_, const std::optional<UInt128> & checksum_)
: BackupEntryFromMemory(String{reinterpret_cast<const char *>(data_), size_}, checksum_)
BackupEntryFromMemory::BackupEntryFromMemory(const void * data_, size_t size_)
: BackupEntryFromMemory(String{reinterpret_cast<const char *>(data_), size_})
{
}
BackupEntryFromMemory::BackupEntryFromMemory(String data_, const std::optional<UInt128> & checksum_)
: data(std::move(data_)), checksum(checksum_)
BackupEntryFromMemory::BackupEntryFromMemory(String data_) : data(std::move(data_))
{
}
std::unique_ptr<SeekableReadBuffer> BackupEntryFromMemory::getReadBuffer() const
std::unique_ptr<SeekableReadBuffer> BackupEntryFromMemory::getReadBuffer(const ReadSettings &) const
{
return std::make_unique<ReadBufferFromString>(data);
}

View File

@ -1,39 +1,26 @@
#pragma once
#include <Backups/IBackupEntry.h>
#include <IO/ReadBufferFromString.h>
#include <Backups/BackupEntryWithChecksumCalculation.h>
namespace DB
{
/// Represents small preloaded data to be included in a backup.
class BackupEntryFromMemory : public IBackupEntry
class BackupEntryFromMemory : public BackupEntryWithChecksumCalculation<IBackupEntry>
{
public:
/// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data.
BackupEntryFromMemory(const void * data_, size_t size_, const std::optional<UInt128> & checksum_ = {});
explicit BackupEntryFromMemory(String data_, const std::optional<UInt128> & checksum_ = {});
BackupEntryFromMemory(const void * data_, size_t size_);
explicit BackupEntryFromMemory(String data_);
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings &) const override;
UInt64 getSize() const override { return data.size(); }
std::optional<UInt128> getChecksum() const override { return checksum; }
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override;
String getFilePath() const override
{
return "";
}
DataSourceDescription getDataSourceDescription() const override
{
return DataSourceDescription{DataSourceType::RAM, "", false, false};
}
DiskPtr tryGetDiskIfExists() const override { return nullptr; }
DataSourceDescription getDataSourceDescription() const override { return DataSourceDescription{DataSourceType::RAM, "", false, false}; }
private:
const String data;
const std::optional<UInt128> checksum;
};
}

View File

@ -1,6 +1,9 @@
#include <Backups/BackupEntryFromSmallFile.h>
#include <Common/filesystemHelpers.h>
#include <Disks/DiskLocal.h>
#include <Disks/IDisk.h>
#include <Disks/IO/createReadBufferFromFileBase.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
@ -16,9 +19,9 @@ namespace
return s;
}
String readFile(const DiskPtr & disk, const String & file_path)
String readFile(const DiskPtr & disk, const String & file_path, bool copy_encrypted)
{
auto buf = disk->readFile(file_path);
auto buf = copy_encrypted ? disk->readEncryptedFile(file_path, {}) : disk->readFile(file_path);
String s;
readStringUntilEOF(s, *buf);
return s;
@ -26,15 +29,25 @@ namespace
}
BackupEntryFromSmallFile::BackupEntryFromSmallFile(const String & file_path_, const std::optional<UInt128> & checksum_)
: BackupEntryFromMemory(readFile(file_path_), checksum_), file_path(file_path_)
BackupEntryFromSmallFile::BackupEntryFromSmallFile(const String & file_path_)
: file_path(file_path_)
, data_source_description(DiskLocal::getLocalDataSourceDescription(file_path_))
, data(readFile(file_path_))
{
}
BackupEntryFromSmallFile::BackupEntryFromSmallFile(
const DiskPtr & disk_, const String & file_path_, const std::optional<UInt128> & checksum_)
: BackupEntryFromMemory(readFile(disk_, file_path_), checksum_), disk(disk_), file_path(file_path_)
BackupEntryFromSmallFile::BackupEntryFromSmallFile(const DiskPtr & disk_, const String & file_path_, bool copy_encrypted_)
: disk(disk_)
, file_path(file_path_)
, data_source_description(disk_->getDataSourceDescription())
, copy_encrypted(copy_encrypted_ && data_source_description.is_encrypted)
, data(readFile(disk_, file_path, copy_encrypted))
{
}
std::unique_ptr<SeekableReadBuffer> BackupEntryFromSmallFile::getReadBuffer(const ReadSettings &) const
{
return std::make_unique<ReadBufferFromString>(data);
}
}

View File

@ -1,6 +1,6 @@
#pragma once
#include <Backups/BackupEntryFromMemory.h>
#include <Backups/BackupEntryWithChecksumCalculation.h>
namespace DB
@ -10,25 +10,28 @@ using DiskPtr = std::shared_ptr<IDisk>;
/// Represents a file prepared to be included in a backup,
/// assuming that the file is small and can be easily loaded into memory.
class BackupEntryFromSmallFile : public BackupEntryFromMemory
class BackupEntryFromSmallFile : public BackupEntryWithChecksumCalculation<IBackupEntry>
{
public:
/// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data.
explicit BackupEntryFromSmallFile(
const String & file_path_,
const std::optional<UInt128> & checksum_ = {});
explicit BackupEntryFromSmallFile(const String & file_path_);
BackupEntryFromSmallFile(const DiskPtr & disk_, const String & file_path_, bool copy_encrypted_ = false);
BackupEntryFromSmallFile(
const DiskPtr & disk_,
const String & file_path_,
const std::optional<UInt128> & checksum_ = {});
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings &) const override;
UInt64 getSize() const override { return data.size(); }
DataSourceDescription getDataSourceDescription() const override { return data_source_description; }
bool isEncryptedByDisk() const override { return copy_encrypted; }
bool isFromFile() const override { return true; }
DiskPtr getDisk() const override { return disk; }
String getFilePath() const override { return file_path; }
DiskPtr tryGetDiskIfExists() const override { return disk; }
private:
const DiskPtr disk;
const String file_path;
const DataSourceDescription data_source_description;
const bool copy_encrypted = false;
const String data;
};
}

View File

@ -0,0 +1,54 @@
#include <Backups/BackupEntryWithChecksumCalculation.h>
#include <IO/HashingReadBuffer.h>
namespace DB
{
template <typename Base>
UInt128 BackupEntryWithChecksumCalculation<Base>::getChecksum() const
{
std::lock_guard lock{checksum_calculation_mutex};
if (!calculated_checksum)
{
auto read_buffer = this->getReadBuffer(ReadSettings{}.adjustBufferSize(this->getSize()));
HashingReadBuffer hashing_read_buffer(*read_buffer);
hashing_read_buffer.ignoreAll();
calculated_checksum = hashing_read_buffer.getHash();
}
return *calculated_checksum;
}
template <typename Base>
std::optional<UInt128> BackupEntryWithChecksumCalculation<Base>::getPartialChecksum(size_t prefix_length) const
{
if (prefix_length == 0)
return 0;
size_t size = this->getSize();
if (prefix_length >= size)
return this->getChecksum();
std::lock_guard lock{checksum_calculation_mutex};
ReadSettings read_settings;
if (calculated_checksum)
read_settings.adjustBufferSize(calculated_checksum ? prefix_length : size);
auto read_buffer = this->getReadBuffer(read_settings);
HashingReadBuffer hashing_read_buffer(*read_buffer);
hashing_read_buffer.ignore(prefix_length);
auto partial_checksum = hashing_read_buffer.getHash();
if (!calculated_checksum)
{
hashing_read_buffer.ignoreAll();
calculated_checksum = hashing_read_buffer.getHash();
}
return partial_checksum;
}
template class BackupEntryWithChecksumCalculation<IBackupEntry>;
}

View File

@ -0,0 +1,22 @@
#pragma once
#include <Backups/IBackupEntry.h>
namespace DB
{
/// Calculates the checksum and the partial checksum for a backup entry based on ReadBuffer returned by getReadBuffer().
template <typename Base>
class BackupEntryWithChecksumCalculation : public Base
{
public:
UInt128 getChecksum() const override;
std::optional<UInt128> getPartialChecksum(size_t prefix_length) const override;
private:
mutable std::optional<UInt128> calculated_checksum;
mutable std::mutex checksum_calculation_mutex;
};
}

View File

@ -15,23 +15,33 @@ public:
BackupEntryWrappedWith(BackupEntryPtr entry_, T && custom_value_) : entry(entry_), custom_value(std::move(custom_value_)) { }
~BackupEntryWrappedWith() override = default;
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const override { return entry->getReadBuffer(read_settings); }
UInt64 getSize() const override { return entry->getSize(); }
std::optional<UInt128> getChecksum() const override { return entry->getChecksum(); }
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override { return entry->getReadBuffer(); }
String getFilePath() const override { return entry->getFilePath(); }
DiskPtr tryGetDiskIfExists() const override { return entry->tryGetDiskIfExists(); }
UInt128 getChecksum() const override { return entry->getChecksum(); }
std::optional<UInt128> getPartialChecksum(size_t prefix_length) const override { return entry->getPartialChecksum(prefix_length); }
DataSourceDescription getDataSourceDescription() const override { return entry->getDataSourceDescription(); }
bool isEncryptedByDisk() const override { return entry->isEncryptedByDisk(); }
bool isFromFile() const override { return entry->isFromFile(); }
bool isFromImmutableFile() const override { return entry->isFromImmutableFile(); }
String getFilePath() const override { return entry->getFilePath(); }
DiskPtr getDisk() const override { return entry->getDisk(); }
private:
BackupEntryPtr entry;
T custom_value;
};
template <typename T>
BackupEntryPtr wrapBackupEntryWith(BackupEntryPtr && backup_entry, const T & custom_value)
{
return std::make_shared<BackupEntryWrappedWith<T>>(std::move(backup_entry), custom_value);
}
template <typename T>
void wrapBackupEntriesWith(std::vector<std::pair<String, BackupEntryPtr>> & backup_entries, const T & custom_value)
{
for (auto & [_, backup_entry] : backup_entries)
backup_entry = std::make_shared<BackupEntryWrappedWith<T>>(std::move(backup_entry), custom_value);
backup_entry = wrapBackupEntryWith(std::move(backup_entry), custom_value);
}
}

View File

@ -7,7 +7,7 @@
#include <Common/scope_guard_safe.h>
#include <Common/setThreadName.h>
#include <Common/ThreadPool.h>
#include <IO/HashingReadBuffer.h>
#include <base/hex.h>
namespace DB
@ -36,7 +36,7 @@ namespace
{
/// We cannot reuse base backup because our file is smaller
/// than file stored in previous backup
if (new_entry_info.size < base_backup_info.first)
if ((new_entry_info.size < base_backup_info.first) || !base_backup_info.first)
return CheckBackupResult::HasNothing;
if (base_backup_info.first == new_entry_info.size)
@ -48,45 +48,22 @@ namespace
struct ChecksumsForNewEntry
{
UInt128 full_checksum;
UInt128 prefix_checksum;
/// 0 is the valid checksum of empty data.
UInt128 full_checksum = 0;
/// std::nullopt here means that it's too difficult to calculate a partial checksum so it shouldn't be used.
std::optional<UInt128> prefix_checksum;
};
/// Calculate checksum for backup entry if it's empty.
/// Also able to calculate additional checksum of some prefix.
ChecksumsForNewEntry calculateNewEntryChecksumsIfNeeded(const BackupEntryPtr & entry, size_t prefix_size)
{
if (prefix_size > 0)
{
auto read_buffer = entry->getReadBuffer();
HashingReadBuffer hashing_read_buffer(*read_buffer);
hashing_read_buffer.ignore(prefix_size);
auto prefix_checksum = hashing_read_buffer.getHash();
if (entry->getChecksum() == std::nullopt)
{
hashing_read_buffer.ignoreAll();
auto full_checksum = hashing_read_buffer.getHash();
return ChecksumsForNewEntry{full_checksum, prefix_checksum};
}
else
{
return ChecksumsForNewEntry{*(entry->getChecksum()), prefix_checksum};
}
}
else
{
if (entry->getChecksum() == std::nullopt)
{
auto read_buffer = entry->getReadBuffer();
HashingReadBuffer hashing_read_buffer(*read_buffer);
hashing_read_buffer.ignoreAll();
return ChecksumsForNewEntry{hashing_read_buffer.getHash(), 0};
}
else
{
return ChecksumsForNewEntry{*(entry->getChecksum()), 0};
}
}
ChecksumsForNewEntry res;
/// The partial checksum should be calculated before the full checksum to enable optimization in BackupEntryWithChecksumCalculation.
res.prefix_checksum = entry->getPartialChecksum(prefix_size);
res.full_checksum = entry->getChecksum();
return res;
}
/// We store entries' file names in the backup without leading slashes.
@ -111,6 +88,7 @@ String BackupFileInfo::describe() const
result += fmt::format("base_checksum: {};\n", getHexUIntLowercase(checksum));
result += fmt::format("data_file_name: {};\n", data_file_name);
result += fmt::format("data_file_index: {};\n", data_file_index);
result += fmt::format("encrypted_by_disk: {};\n", encrypted_by_disk);
return result;
}
@ -122,6 +100,7 @@ BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const Backu
BackupFileInfo info;
info.file_name = adjusted_path;
info.size = backup_entry->getSize();
info.encrypted_by_disk = backup_entry->isEncryptedByDisk();
/// We don't set `info.data_file_name` and `info.data_file_index` in this function because they're set during backup coordination
/// (see the class BackupCoordinationFileInfos).
@ -139,7 +118,7 @@ BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const Backu
/// We have info about this file in base backup
/// If file has no checksum -- calculate and fill it.
if (base_backup_file_info.has_value())
if (base_backup_file_info)
{
LOG_TRACE(log, "File {} found in base backup, checking for equality", adjusted_path);
CheckBackupResult check_base = checkBaseBackupForFile(*base_backup_file_info, info);

View File

@ -35,6 +35,9 @@ struct BackupFileInfo
/// This field is set during backup coordination (see the class BackupCoordinationFileInfos).
size_t data_file_index = static_cast<size_t>(-1);
/// Whether this file is encrypted by an encrypted disk.
bool encrypted_by_disk = false;
struct LessByFileName
{
bool operator()(const BackupFileInfo & lhs, const BackupFileInfo & rhs) const { return (lhs.file_name < rhs.file_name); }

View File

@ -1,46 +0,0 @@
#include <Backups/BackupIO.h>
#include <IO/copyData.h>
#include <IO/WriteBufferFromFileBase.h>
#include <IO/SeekableReadBuffer.h>
#include <Interpreters/Context.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
}
void IBackupReader::copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings)
{
auto read_buffer = readFile(file_name);
auto write_buffer = destination_disk->writeFile(destination_path, std::min<size_t>(size, DBMS_DEFAULT_BUFFER_SIZE), write_mode, write_settings);
copyData(*read_buffer, *write_buffer, size);
write_buffer->finalize();
}
IBackupWriter::IBackupWriter(const ContextPtr & context_)
: read_settings(context_->getBackupReadSettings())
, has_throttling(static_cast<bool>(context_->getBackupsThrottler()))
{}
void IBackupWriter::copyDataToFile(const CreateReadBufferFunction & create_read_buffer, UInt64 offset, UInt64 size, const String & dest_file_name)
{
auto read_buffer = create_read_buffer();
if (offset)
read_buffer->seek(offset, SEEK_SET);
auto write_buffer = writeFile(dest_file_name);
copyData(*read_buffer, *write_buffer, size);
write_buffer->finalize();
}
void IBackupWriter::copyFileNative(
DiskPtr /* src_disk */, const String & /* src_file_name */, UInt64 /* src_offset */, UInt64 /* src_size */, const String & /* dest_file_name */)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Native copy not implemented for backup writer");
}
}

View File

@ -1,58 +1,72 @@
#pragma once
#include <Core/Types.h>
#include <Disks/DiskType.h>
#include <Disks/IDisk.h>
#include <IO/ReadSettings.h>
#include <Interpreters/Context_fwd.h>
namespace DB
{
class IDisk;
using DiskPtr = std::shared_ptr<IDisk>;
class SeekableReadBuffer;
class WriteBuffer;
enum class WriteMode;
struct WriteSettings;
struct ReadSettings;
/// Represents operations of loading from disk or downloading for reading a backup.
class IBackupReader /// BackupReaderFile, BackupReaderDisk
/// See also implementations: BackupReaderFile, BackupReaderDisk.
class IBackupReader
{
public:
virtual ~IBackupReader() = default;
virtual bool fileExists(const String & file_name) = 0;
virtual UInt64 getFileSize(const String & file_name) = 0;
virtual std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) = 0;
virtual void copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings);
virtual DataSourceDescription getDataSourceDescription() const = 0;
/// The function copyFileToDisk() can be much faster than reading the file with readFile() and then writing it to some disk.
/// (especially for S3 where it can use CopyObject to copy objects inside S3 instead of downloading and uploading them).
/// Parameters:
/// `encrypted_in_backup` specify if this file is encrypted in the backup, so it shouldn't be encrypted again while restoring to an encrypted disk.
virtual void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) = 0;
virtual const ReadSettings & getReadSettings() const = 0;
virtual const WriteSettings & getWriteSettings() const = 0;
virtual size_t getWriteBufferSize() const = 0;
};
/// Represents operations of storing to disk or uploading for writing a backup.
class IBackupWriter /// BackupWriterFile, BackupWriterDisk
/// See also implementations: BackupWriterFile, BackupWriterDisk
class IBackupWriter
{
public:
using CreateReadBufferFunction = std::function<std::unique_ptr<SeekableReadBuffer>()>;
explicit IBackupWriter(const ContextPtr & context_);
virtual ~IBackupWriter() = default;
virtual bool fileExists(const String & file_name) = 0;
virtual UInt64 getFileSize(const String & file_name) = 0;
virtual bool fileContentsEqual(const String & file_name, const String & expected_file_contents) = 0;
virtual std::unique_ptr<WriteBuffer> writeFile(const String & file_name) = 0;
using CreateReadBufferFunction = std::function<std::unique_ptr<SeekableReadBuffer>()>;
virtual void copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length) = 0;
/// The function copyFileFromDisk() can be much faster than copyDataToFile()
/// (especially for S3 where it can use CopyObject to copy objects inside S3 instead of downloading and uploading them).
/// Parameters:
/// `start_pos` and `length` specify a part of the file on `src_disk` to copy to the backup.
/// `copy_encrypted` specify whether this function should copy encrypted data of the file `src_path` to the backup.
virtual void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
bool copy_encrypted, UInt64 start_pos, UInt64 length) = 0;
virtual void removeFile(const String & file_name) = 0;
virtual void removeFiles(const Strings & file_names) = 0;
virtual DataSourceDescription getDataSourceDescription() const = 0;
virtual void copyDataToFile(const CreateReadBufferFunction & create_read_buffer, UInt64 offset, UInt64 size, const String & dest_file_name);
virtual bool supportNativeCopy(DataSourceDescription /* data_source_description */) const { return false; }
/// Copy file using native copy (optimized for S3 to use CopyObject)
///
/// NOTE: It still may fall back to copyDataToFile() if native copy is not possible:
/// - different buckets
/// - throttling had been requested
virtual void copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name);
protected:
const ReadSettings read_settings;
const bool has_throttling;
virtual const ReadSettings & getReadSettings() const = 0;
virtual const WriteSettings & getWriteSettings() const = 0;
virtual size_t getWriteBufferSize() const = 0;
};
}

View File

@ -0,0 +1,95 @@
#include <Backups/BackupIO_Default.h>
#include <Disks/IDisk.h>
#include <IO/copyData.h>
#include <IO/WriteBufferFromFileBase.h>
#include <IO/SeekableReadBuffer.h>
#include <Interpreters/Context.h>
#include <Common/logger_useful.h>
namespace DB
{
BackupReaderDefault::BackupReaderDefault(Poco::Logger * log_, const ContextPtr & context_)
: log(log_)
, read_settings(context_->getBackupReadSettings())
, write_settings(context_->getWriteSettings())
, write_buffer_size(DBMS_DEFAULT_BUFFER_SIZE)
{
}
void BackupReaderDefault::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
{
LOG_TRACE(log, "Copying file {} to disk {} through buffers", path_in_backup, destination_disk->getName());
auto read_buffer = readFile(path_in_backup);
std::unique_ptr<WriteBuffer> write_buffer;
auto buf_size = std::min(file_size, write_buffer_size);
if (encrypted_in_backup)
write_buffer = destination_disk->writeEncryptedFile(destination_path, buf_size, write_mode, write_settings);
else
write_buffer = destination_disk->writeFile(destination_path, buf_size, write_mode, write_settings);
copyData(*read_buffer, *write_buffer, file_size);
write_buffer->finalize();
}
BackupWriterDefault::BackupWriterDefault(Poco::Logger * log_, const ContextPtr & context_)
: log(log_)
, read_settings(context_->getBackupReadSettings())
, write_settings(context_->getWriteSettings())
, write_buffer_size(DBMS_DEFAULT_BUFFER_SIZE)
{
}
bool BackupWriterDefault::fileContentsEqual(const String & file_name, const String & expected_file_contents)
{
if (!fileExists(file_name))
return false;
try
{
auto in = readFile(file_name, expected_file_contents.size());
String actual_file_contents(expected_file_contents.size(), ' ');
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
&& (actual_file_contents == expected_file_contents) && in->eof();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
return false;
}
}
void BackupWriterDefault::copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length)
{
auto read_buffer = create_read_buffer();
if (start_pos)
read_buffer->seek(start_pos, SEEK_SET);
auto write_buffer = writeFile(path_in_backup);
copyData(*read_buffer, *write_buffer, length);
write_buffer->finalize();
}
void BackupWriterDefault::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
bool copy_encrypted, UInt64 start_pos, UInt64 length)
{
LOG_TRACE(log, "Copying file {} from disk {} through buffers", src_path, src_disk->getName());
auto create_read_buffer = [src_disk, src_path, copy_encrypted, settings = read_settings.adjustBufferSize(start_pos + length)]
{
if (copy_encrypted)
return src_disk->readEncryptedFile(src_path, settings);
else
return src_disk->readFile(src_path, settings);
};
copyDataToFile(path_in_backup, create_read_buffer, start_pos, length);
}
}

View File

@ -0,0 +1,73 @@
#pragma once
#include <Backups/BackupIO.h>
#include <IO/ReadSettings.h>
#include <IO/WriteSettings.h>
#include <Interpreters/Context_fwd.h>
namespace DB
{
class IDisk;
using DiskPtr = std::shared_ptr<IDisk>;
class ReadBuffer;
class SeekableReadBuffer;
class WriteBuffer;
enum class WriteMode;
/// Represents operations of loading from disk or downloading for reading a backup.
class BackupReaderDefault : public IBackupReader
{
public:
BackupReaderDefault(Poco::Logger * log_, const ContextPtr & context_);
~BackupReaderDefault() override = default;
/// The function copyFileToDisk() can be much faster than reading the file with readFile() and then writing it to some disk.
/// (especially for S3 where it can use CopyObject to copy objects inside S3 instead of downloading and uploading them).
/// Parameters:
/// `encrypted_in_backup` specify if this file is encrypted in the backup, so it shouldn't be encrypted again while restoring to an encrypted disk.
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
const ReadSettings & getReadSettings() const override { return read_settings; }
const WriteSettings & getWriteSettings() const override { return write_settings; }
size_t getWriteBufferSize() const override { return write_buffer_size; }
protected:
Poco::Logger * const log;
const ReadSettings read_settings;
/// The write settings are used to write to the source disk in copyFileToDisk().
const WriteSettings write_settings;
const size_t write_buffer_size;
};
/// Represents operations of storing to disk or uploading for writing a backup.
class BackupWriterDefault : public IBackupWriter
{
public:
BackupWriterDefault(Poco::Logger * log_, const ContextPtr & context_);
~BackupWriterDefault() override = default;
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
void copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length) override;
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path, bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
const ReadSettings & getReadSettings() const override { return read_settings; }
const WriteSettings & getWriteSettings() const override { return write_settings; }
size_t getWriteBufferSize() const override { return write_buffer_size; }
protected:
/// Here readFile() is used only to implement fileContentsEqual().
virtual std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) = 0;
Poco::Logger * const log;
/// The read settings are used to read from the source disk in copyFileFromDisk().
const ReadSettings read_settings;
const WriteSettings write_settings;
const size_t write_buffer_size;
};
}

View File

@ -8,13 +8,11 @@
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
BackupReaderDisk::BackupReaderDisk(const DiskPtr & disk_, const String & path_)
: disk(disk_), path(path_), log(&Poco::Logger::get("BackupReaderDisk"))
BackupReaderDisk::BackupReaderDisk(const DiskPtr & disk_, const String & root_path_, const ContextPtr & context_)
: BackupReaderDefault(&Poco::Logger::get("BackupReaderDisk"), context_)
, disk(disk_)
, root_path(root_path_)
, data_source_description(disk->getDataSourceDescription())
{
}
@ -22,38 +20,47 @@ BackupReaderDisk::~BackupReaderDisk() = default;
bool BackupReaderDisk::fileExists(const String & file_name)
{
return disk->exists(path / file_name);
return disk->exists(root_path / file_name);
}
UInt64 BackupReaderDisk::getFileSize(const String & file_name)
{
return disk->getFileSize(path / file_name);
return disk->getFileSize(root_path / file_name);
}
std::unique_ptr<SeekableReadBuffer> BackupReaderDisk::readFile(const String & file_name)
{
return disk->readFile(path / file_name);
return disk->readFile(root_path / file_name, read_settings);
}
void BackupReaderDisk::copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings)
void BackupReaderDisk::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
{
if (write_mode == WriteMode::Rewrite)
/// Use IDisk::copyFile() as a more optimal way to copy a file if it's possible.
/// However IDisk::copyFile() can't use throttling for reading, and can't copy an encrypted file or do appending.
bool has_throttling = disk->isRemote() ? static_cast<bool>(read_settings.remote_throttler) : static_cast<bool>(read_settings.local_throttler);
if (!has_throttling && (write_mode == WriteMode::Rewrite) && !encrypted_in_backup)
{
LOG_TRACE(log, "Copying {}/{} from disk {} to {} by the disk", path, file_name, disk->getName(), destination_disk->getName());
disk->copyFile(path / file_name, *destination_disk, destination_path, write_settings);
return;
auto destination_data_source_description = destination_disk->getDataSourceDescription();
if (destination_data_source_description.sameKind(data_source_description) && !data_source_description.is_encrypted)
{
/// Use more optimal way.
LOG_TRACE(log, "Copying file {} from disk {} to disk {}", path_in_backup, disk->getName(), destination_disk->getName());
disk->copyFile(root_path / path_in_backup, *destination_disk, destination_path, write_settings);
return; /// copied!
}
}
LOG_TRACE(log, "Copying {}/{} from disk {} to {} through buffers", path, file_name, disk->getName(), destination_disk->getName());
IBackupReader::copyFileToDisk(file_name, size, destination_disk, destination_path, write_mode, write_settings);
/// Fallback to copy through buffers.
BackupReaderDefault::copyFileToDisk(path_in_backup, file_size, encrypted_in_backup, destination_disk, destination_path, write_mode);
}
BackupWriterDisk::BackupWriterDisk(const DiskPtr & disk_, const String & path_, const ContextPtr & context_)
: IBackupWriter(context_)
BackupWriterDisk::BackupWriterDisk(const DiskPtr & disk_, const String & root_path_, const ContextPtr & context_)
: BackupWriterDefault(&Poco::Logger::get("BackupWriterDisk"), context_)
, disk(disk_)
, path(path_)
, root_path(root_path_)
, data_source_description(disk->getDataSourceDescription())
{
}
@ -61,85 +68,64 @@ BackupWriterDisk::~BackupWriterDisk() = default;
bool BackupWriterDisk::fileExists(const String & file_name)
{
return disk->exists(path / file_name);
return disk->exists(root_path / file_name);
}
UInt64 BackupWriterDisk::getFileSize(const String & file_name)
{
return disk->getFileSize(path / file_name);
return disk->getFileSize(root_path / file_name);
}
bool BackupWriterDisk::fileContentsEqual(const String & file_name, const String & expected_file_contents)
std::unique_ptr<ReadBuffer> BackupWriterDisk::readFile(const String & file_name, size_t expected_file_size)
{
if (!disk->exists(path / file_name))
return false;
try
{
auto in = disk->readFile(path / file_name);
String actual_file_contents(expected_file_contents.size(), ' ');
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
&& (actual_file_contents == expected_file_contents) && in->eof();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
return false;
}
return disk->readFile(root_path / file_name, read_settings.adjustBufferSize(expected_file_size));
}
std::unique_ptr<WriteBuffer> BackupWriterDisk::writeFile(const String & file_name)
{
auto file_path = path / file_name;
auto file_path = root_path / file_name;
disk->createDirectories(file_path.parent_path());
return disk->writeFile(file_path);
return disk->writeFile(file_path, write_buffer_size, WriteMode::Rewrite, write_settings);
}
void BackupWriterDisk::removeFile(const String & file_name)
{
disk->removeFileIfExists(path / file_name);
if (disk->isDirectory(path) && disk->isDirectoryEmpty(path))
disk->removeDirectory(path);
disk->removeFileIfExists(root_path / file_name);
if (disk->isDirectory(root_path) && disk->isDirectoryEmpty(root_path))
disk->removeDirectory(root_path);
}
void BackupWriterDisk::removeFiles(const Strings & file_names)
{
for (const auto & file_name : file_names)
disk->removeFileIfExists(path / file_name);
if (disk->isDirectory(path) && disk->isDirectoryEmpty(path))
disk->removeDirectory(path);
disk->removeFileIfExists(root_path / file_name);
if (disk->isDirectory(root_path) && disk->isDirectoryEmpty(root_path))
disk->removeDirectory(root_path);
}
DataSourceDescription BackupWriterDisk::getDataSourceDescription() const
void BackupWriterDisk::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
bool copy_encrypted, UInt64 start_pos, UInt64 length)
{
return disk->getDataSourceDescription();
}
DataSourceDescription BackupReaderDisk::getDataSourceDescription() const
{
return disk->getDataSourceDescription();
}
bool BackupWriterDisk::supportNativeCopy(DataSourceDescription data_source_description) const
{
return data_source_description == disk->getDataSourceDescription();
}
void BackupWriterDisk::copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name)
{
if (!src_disk)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot natively copy data to disk without source disk");
if (has_throttling || (src_offset != 0) || (src_size != src_disk->getFileSize(src_file_name)))
/// Use IDisk::copyFile() as a more optimal way to copy a file if it's possible.
/// However IDisk::copyFile() can't use throttling for reading, and can't copy an encrypted file or copy a part of the file.
bool has_throttling = src_disk->isRemote() ? static_cast<bool>(read_settings.remote_throttler) : static_cast<bool>(read_settings.local_throttler);
if (!has_throttling && !start_pos && !copy_encrypted)
{
auto create_read_buffer = [this, src_disk, src_file_name] { return src_disk->readFile(src_file_name, read_settings); };
copyDataToFile(create_read_buffer, src_offset, src_size, dest_file_name);
return;
auto source_data_source_description = src_disk->getDataSourceDescription();
if (source_data_source_description.sameKind(data_source_description) && !source_data_source_description.is_encrypted
&& (length == src_disk->getFileSize(src_path)))
{
/// Use more optimal way.
LOG_TRACE(log, "Copying file {} from disk {} to disk {}", src_path, src_disk->getName(), disk->getName());
auto dest_file_path = root_path / path_in_backup;
disk->createDirectories(dest_file_path.parent_path());
src_disk->copyFile(src_path, *disk, dest_file_path, write_settings);
return; /// copied!
}
}
auto file_path = path / dest_file_name;
disk->createDirectories(file_path.parent_path());
src_disk->copyFile(src_file_name, *disk, file_path);
/// Fallback to copy through buffers.
BackupWriterDefault::copyFileFromDisk(path_in_backup, src_disk, src_path, copy_encrypted, start_pos, length);
}
}

View File

@ -1,53 +1,58 @@
#pragma once
#include <Backups/BackupIO_Default.h>
#include <Disks/DiskType.h>
#include <filesystem>
#include <Backups/BackupIO.h>
#include <Interpreters/Context_fwd.h>
namespace DB
{
class IDisk;
using DiskPtr = std::shared_ptr<IDisk>;
class BackupReaderDisk : public IBackupReader
class BackupReaderDisk : public BackupReaderDefault
{
public:
BackupReaderDisk(const DiskPtr & disk_, const String & path_);
BackupReaderDisk(const DiskPtr & disk_, const String & root_path_, const ContextPtr & context_);
~BackupReaderDisk() override;
bool fileExists(const String & file_name) override;
UInt64 getFileSize(const String & file_name) override;
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
void copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings) override;
DataSourceDescription getDataSourceDescription() const override;
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
private:
DiskPtr disk;
std::filesystem::path path;
Poco::Logger * log;
const DiskPtr disk;
const std::filesystem::path root_path;
const DataSourceDescription data_source_description;
};
class BackupWriterDisk : public IBackupWriter
class BackupWriterDisk : public BackupWriterDefault
{
public:
BackupWriterDisk(const DiskPtr & disk_, const String & path_, const ContextPtr & context_);
BackupWriterDisk(const DiskPtr & disk_, const String & root_path_, const ContextPtr & context_);
~BackupWriterDisk() override;
bool fileExists(const String & file_name) override;
UInt64 getFileSize(const String & file_name) override;
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
void removeFile(const String & file_name) override;
void removeFiles(const Strings & file_names) override;
DataSourceDescription getDataSourceDescription() const override;
bool supportNativeCopy(DataSourceDescription data_source_description) const override;
void copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name) override;
private:
DiskPtr disk;
std::filesystem::path path;
std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) override;
const DiskPtr disk;
const std::filesystem::path root_path;
const DataSourceDescription data_source_description;
};
}

View File

@ -1,9 +1,7 @@
#include <Backups/BackupIO_File.h>
#include <Disks/IDisk.h>
#include <Disks/DiskLocal.h>
#include <Disks/IO/createReadBufferFromFileBase.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/copyData.h>
#include <Common/filesystemHelpers.h>
#include <Common/logger_useful.h>
@ -12,158 +10,146 @@ namespace fs = std::filesystem;
namespace DB
{
BackupReaderFile::BackupReaderFile(const String & path_) : path(path_), log(&Poco::Logger::get("BackupReaderFile"))
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
BackupReaderFile::BackupReaderFile(const String & root_path_, const ContextPtr & context_)
: BackupReaderDefault(&Poco::Logger::get("BackupReaderFile"), context_)
, root_path(root_path_)
, data_source_description(DiskLocal::getLocalDataSourceDescription(root_path))
{
}
BackupReaderFile::~BackupReaderFile() = default;
bool BackupReaderFile::fileExists(const String & file_name)
{
return fs::exists(path / file_name);
return fs::exists(root_path / file_name);
}
UInt64 BackupReaderFile::getFileSize(const String & file_name)
{
return fs::file_size(path / file_name);
return fs::file_size(root_path / file_name);
}
std::unique_ptr<SeekableReadBuffer> BackupReaderFile::readFile(const String & file_name)
{
return createReadBufferFromFileBase(path / file_name, {});
return createReadBufferFromFileBase(root_path / file_name, read_settings);
}
void BackupReaderFile::copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings)
void BackupReaderFile::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
{
if (destination_disk->getDataSourceDescription() == getDataSourceDescription())
/// std::filesystem::copy() can copy from the filesystem only, and can't do throttling or appending.
bool has_throttling = static_cast<bool>(read_settings.local_throttler);
if (!has_throttling && (write_mode == WriteMode::Rewrite))
{
/// Use more optimal way.
LOG_TRACE(log, "Copying {}/{} to disk {} locally", path, file_name, destination_disk->getName());
fs::copy(path / file_name, fullPath(destination_disk, destination_path), fs::copy_options::overwrite_existing);
return;
auto destination_data_source_description = destination_disk->getDataSourceDescription();
if (destination_data_source_description.sameKind(data_source_description)
&& (destination_data_source_description.is_encrypted == encrypted_in_backup))
{
/// Use more optimal way.
LOG_TRACE(log, "Copying file {} to disk {} locally", path_in_backup, destination_disk->getName());
auto write_blob_function = [abs_source_path = root_path / path_in_backup, file_size](
const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> &) -> size_t
{
/// For local disks the size of a blob path is expected to be 1.
if (blob_path.size() != 1 || mode != WriteMode::Rewrite)
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Blob writing function called with unexpected blob_path.size={} or mode={}",
blob_path.size(), mode);
fs::copy(abs_source_path, blob_path.at(0), fs::copy_options::overwrite_existing);
return file_size;
};
destination_disk->writeFileUsingBlobWritingFunction(destination_path, write_mode, write_blob_function);
return; /// copied!
}
}
LOG_TRACE(log, "Copying {}/{} to disk {} through buffers", path, file_name, destination_disk->getName());
IBackupReader::copyFileToDisk(path / file_name, size, destination_disk, destination_path, write_mode, write_settings);
/// Fallback to copy through buffers.
BackupReaderDefault::copyFileToDisk(path_in_backup, file_size, encrypted_in_backup, destination_disk, destination_path, write_mode);
}
BackupWriterFile::BackupWriterFile(const String & path_, const ContextPtr & context_)
: IBackupWriter(context_)
, path(path_)
BackupWriterFile::BackupWriterFile(const String & root_path_, const ContextPtr & context_)
: BackupWriterDefault(&Poco::Logger::get("BackupWriterFile"), context_)
, root_path(root_path_)
, data_source_description(DiskLocal::getLocalDataSourceDescription(root_path))
{
}
BackupWriterFile::~BackupWriterFile() = default;
bool BackupWriterFile::fileExists(const String & file_name)
{
return fs::exists(path / file_name);
return fs::exists(root_path / file_name);
}
UInt64 BackupWriterFile::getFileSize(const String & file_name)
{
return fs::file_size(path / file_name);
return fs::file_size(root_path / file_name);
}
bool BackupWriterFile::fileContentsEqual(const String & file_name, const String & expected_file_contents)
std::unique_ptr<ReadBuffer> BackupWriterFile::readFile(const String & file_name, size_t expected_file_size)
{
if (!fs::exists(path / file_name))
return false;
try
{
auto in = createReadBufferFromFileBase(path / file_name, {});
String actual_file_contents(expected_file_contents.size(), ' ');
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
&& (actual_file_contents == expected_file_contents) && in->eof();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
return false;
}
return createReadBufferFromFileBase(root_path / file_name, read_settings.adjustBufferSize(expected_file_size));
}
std::unique_ptr<WriteBuffer> BackupWriterFile::writeFile(const String & file_name)
{
auto file_path = path / file_name;
auto file_path = root_path / file_name;
fs::create_directories(file_path.parent_path());
return std::make_unique<WriteBufferFromFile>(file_path);
return std::make_unique<WriteBufferFromFile>(file_path, write_buffer_size, -1, write_settings.local_throttler);
}
void BackupWriterFile::removeFile(const String & file_name)
{
fs::remove(path / file_name);
if (fs::is_directory(path) && fs::is_empty(path))
fs::remove(path);
fs::remove(root_path / file_name);
if (fs::is_directory(root_path) && fs::is_empty(root_path))
fs::remove(root_path);
}
void BackupWriterFile::removeFiles(const Strings & file_names)
{
for (const auto & file_name : file_names)
fs::remove(path / file_name);
if (fs::is_directory(path) && fs::is_empty(path))
fs::remove(path);
fs::remove(root_path / file_name);
if (fs::is_directory(root_path) && fs::is_empty(root_path))
fs::remove(root_path);
}
DataSourceDescription BackupWriterFile::getDataSourceDescription() const
void BackupWriterFile::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
bool copy_encrypted, UInt64 start_pos, UInt64 length)
{
DataSourceDescription data_source_description;
data_source_description.type = DataSourceType::Local;
if (auto block_device_id = tryGetBlockDeviceId(path); block_device_id.has_value())
data_source_description.description = *block_device_id;
else
data_source_description.description = path;
data_source_description.is_encrypted = false;
data_source_description.is_cached = false;
return data_source_description;
}
DataSourceDescription BackupReaderFile::getDataSourceDescription() const
{
DataSourceDescription data_source_description;
data_source_description.type = DataSourceType::Local;
if (auto block_device_id = tryGetBlockDeviceId(path); block_device_id.has_value())
data_source_description.description = *block_device_id;
else
data_source_description.description = path;
data_source_description.is_encrypted = false;
data_source_description.is_cached = false;
return data_source_description;
}
bool BackupWriterFile::supportNativeCopy(DataSourceDescription data_source_description) const
{
return data_source_description == getDataSourceDescription();
}
void BackupWriterFile::copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name)
{
std::string abs_source_path;
if (src_disk)
abs_source_path = fullPath(src_disk, src_file_name);
else
abs_source_path = fs::absolute(src_file_name);
if (has_throttling || (src_offset != 0) || (src_size != fs::file_size(abs_source_path)))
/// std::filesystem::copy() can copy from the filesystem only, and can't do throttling or copy a part of the file.
bool has_throttling = static_cast<bool>(read_settings.local_throttler);
if (!has_throttling)
{
auto create_read_buffer = [this, abs_source_path] { return createReadBufferFromFileBase(abs_source_path, read_settings); };
copyDataToFile(create_read_buffer, src_offset, src_size, dest_file_name);
return;
auto source_data_source_description = src_disk->getDataSourceDescription();
if (source_data_source_description.sameKind(data_source_description)
&& (source_data_source_description.is_encrypted == copy_encrypted))
{
/// std::filesystem::copy() can copy from a single file only.
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 1)
{
auto abs_source_path = blob_path[0];
/// std::filesystem::copy() can copy a file as a whole only.
if ((start_pos == 0) && (length == fs::file_size(abs_source_path)))
{
/// Use more optimal way.
LOG_TRACE(log, "Copying file {} from disk {} locally", src_path, src_disk->getName());
auto abs_dest_path = root_path / path_in_backup;
fs::create_directories(abs_dest_path.parent_path());
fs::copy(abs_source_path, abs_dest_path, fs::copy_options::overwrite_existing);
return; /// copied!
}
}
}
}
auto file_path = path / dest_file_name;
fs::create_directories(file_path.parent_path());
fs::copy(abs_source_path, file_path, fs::copy_options::overwrite_existing);
/// Fallback to copy through buffers.
BackupWriterDefault::copyFileFromDisk(path_in_backup, src_disk, src_path, copy_encrypted, start_pos, length);
}
}

View File

@ -1,48 +1,51 @@
#pragma once
#include <Backups/BackupIO_Default.h>
#include <Disks/DiskType.h>
#include <filesystem>
#include <Backups/BackupIO.h>
#include <Interpreters/Context_fwd.h>
namespace DB
{
class BackupReaderFile : public IBackupReader
class BackupReaderFile : public BackupReaderDefault
{
public:
explicit BackupReaderFile(const String & path_);
~BackupReaderFile() override;
explicit BackupReaderFile(const String & root_path_, const ContextPtr & context_);
bool fileExists(const String & file_name) override;
UInt64 getFileSize(const String & file_name) override;
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
void copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings) override;
DataSourceDescription getDataSourceDescription() const override;
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
private:
std::filesystem::path path;
Poco::Logger * log;
const std::filesystem::path root_path;
const DataSourceDescription data_source_description;
};
class BackupWriterFile : public IBackupWriter
class BackupWriterFile : public BackupWriterDefault
{
public:
explicit BackupWriterFile(const String & path_, const ContextPtr & context_);
~BackupWriterFile() override;
BackupWriterFile(const String & root_path_, const ContextPtr & context_);
bool fileExists(const String & file_name) override;
UInt64 getFileSize(const String & file_name) override;
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
void removeFile(const String & file_name) override;
void removeFiles(const Strings & file_names) override;
DataSourceDescription getDataSourceDescription() const override;
bool supportNativeCopy(DataSourceDescription data_source_description) const override;
void copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name) override;
private:
std::filesystem::path path;
std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) override;
const std::filesystem::path root_path;
const DataSourceDescription data_source_description;
};
}

View File

@ -2,7 +2,6 @@
#if USE_AWS_S3
#include <Common/quoteString.h>
#include <Disks/ObjectStorages/S3/copyS3FileToDisk.h>
#include <Interpreters/threadPoolCallbackRunner.h>
#include <Interpreters/Context.h>
#include <IO/SharedThreadPools.h>
@ -12,6 +11,7 @@
#include <IO/S3/copyS3File.h>
#include <IO/S3/Client.h>
#include <IO/S3/Credentials.h>
#include <Disks/IDisk.h>
#include <Poco/Util/AbstractConfiguration.h>
@ -102,21 +102,15 @@ namespace
BackupReaderS3::BackupReaderS3(
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
: s3_uri(s3_uri_)
: BackupReaderDefault(&Poco::Logger::get("BackupReaderS3"), context_)
, s3_uri(s3_uri_)
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
, read_settings(context_->getReadSettings())
, request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings)
, log(&Poco::Logger::get("BackupReaderS3"))
, data_source_description{DataSourceType::S3, s3_uri.endpoint, false, false}
{
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
}
DataSourceDescription BackupReaderS3::getDataSourceDescription() const
{
return DataSourceDescription{DataSourceType::S3, s3_uri.endpoint, false, false};
}
BackupReaderS3::~BackupReaderS3() = default;
bool BackupReaderS3::fileExists(const String & file_name)
@ -138,75 +132,98 @@ std::unique_ptr<SeekableReadBuffer> BackupReaderS3::readFile(const String & file
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, request_settings, read_settings);
}
void BackupReaderS3::copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings)
void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
{
LOG_TRACE(log, "Copying {} to disk {}", file_name, destination_disk->getName());
/// Use the native copy as a more optimal way to copy a file from S3 to S3 if it's possible.
/// We don't check for `has_throttling` here because the native copy almost doesn't use network.
auto destination_data_source_description = destination_disk->getDataSourceDescription();
if (destination_data_source_description.sameKind(data_source_description)
&& (destination_data_source_description.is_encrypted == encrypted_in_backup))
{
/// Use native copy, the more optimal way.
LOG_TRACE(log, "Copying {} from S3 to disk {} using native copy", path_in_backup, destination_disk->getName());
auto write_blob_function = [&](const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> & object_attributes) -> size_t
{
/// Object storage always uses mode `Rewrite` because it simulates append using metadata and different files.
if (blob_path.size() != 2 || mode != WriteMode::Rewrite)
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Blob writing function called with unexpected blob_path.size={} or mode={}",
blob_path.size(), mode);
copyS3FileToDisk(
client,
s3_uri.bucket,
fs::path(s3_uri.key) / file_name,
s3_uri.version_id,
0,
size,
destination_disk,
destination_path,
write_mode,
read_settings,
write_settings,
request_settings,
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupReaderS3"));
copyS3File(
client,
s3_uri.bucket,
fs::path(s3_uri.key) / path_in_backup,
0,
file_size,
/* dest_bucket= */ blob_path[1],
/* dest_key= */ blob_path[0],
request_settings,
object_attributes,
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupReaderS3"),
/* for_disk_s3= */ true);
return file_size;
};
destination_disk->writeFileUsingBlobWritingFunction(destination_path, write_mode, write_blob_function);
return; /// copied!
}
/// Fallback to copy through buffers.
BackupReaderDefault::copyFileToDisk(path_in_backup, file_size, encrypted_in_backup, destination_disk, destination_path, write_mode);
}
BackupWriterS3::BackupWriterS3(
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
: IBackupWriter(context_)
: BackupWriterDefault(&Poco::Logger::get("BackupWriterS3"), context_)
, s3_uri(s3_uri_)
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
, request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings)
, log(&Poco::Logger::get("BackupWriterS3"))
, data_source_description{DataSourceType::S3, s3_uri.endpoint, false, false}
{
request_settings.updateFromSettings(context_->getSettingsRef());
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
}
DataSourceDescription BackupWriterS3::getDataSourceDescription() const
void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
bool copy_encrypted, UInt64 start_pos, UInt64 length)
{
return DataSourceDescription{DataSourceType::S3, s3_uri.endpoint, false, false};
}
bool BackupWriterS3::supportNativeCopy(DataSourceDescription data_source_description) const
{
return getDataSourceDescription() == data_source_description;
}
void BackupWriterS3::copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name)
{
if (!src_disk)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot natively copy data to disk without source disk");
auto objects = src_disk->getStorageObjects(src_file_name);
if (objects.size() > 1)
/// Use the native copy as a more optimal way to copy a file from S3 to S3 if it's possible.
/// We don't check for `has_throttling` here because the native copy almost doesn't use network.
auto source_data_source_description = src_disk->getDataSourceDescription();
if (source_data_source_description.sameKind(data_source_description) && (source_data_source_description.is_encrypted == copy_encrypted))
{
auto create_read_buffer = [this, src_disk, src_file_name] { return src_disk->readFile(src_file_name, read_settings); };
copyDataToFile(create_read_buffer, src_offset, src_size, dest_file_name);
}
else
{
auto object_storage = src_disk->getObjectStorage();
std::string src_bucket = object_storage->getObjectsNamespace();
auto file_path = fs::path(s3_uri.key) / dest_file_name;
copyS3File(client, src_bucket, objects[0].remote_path, src_offset, src_size, s3_uri.bucket, file_path, request_settings, {},
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
/// getBlobPath() can return more than 3 elements if the file is stored as multiple objects in S3 bucket.
/// In this case we can't use the native copy.
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 2)
{
/// Use native copy, the more optimal way.
LOG_TRACE(log, "Copying file {} from disk {} to S3 using native copy", src_path, src_disk->getName());
copyS3File(
client,
/* src_bucket */ blob_path[1],
/* src_key= */ blob_path[0],
start_pos,
length,
s3_uri.bucket,
fs::path(s3_uri.key) / path_in_backup,
request_settings,
{},
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
return; /// copied!
}
}
/// Fallback to copy through buffers.
BackupWriterDefault::copyFileFromDisk(path_in_backup, src_disk, src_path, copy_encrypted, start_pos, length);
}
void BackupWriterS3::copyDataToFile(
const CreateReadBufferFunction & create_read_buffer, UInt64 offset, UInt64 size, const String & dest_file_name)
void BackupWriterS3::copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length)
{
copyDataToS3File(create_read_buffer, offset, size, client, s3_uri.bucket, fs::path(s3_uri.key) / dest_file_name, request_settings, {},
copyDataToS3File(create_read_buffer, start_pos, length, client, s3_uri.bucket, fs::path(s3_uri.key) / path_in_backup, request_settings, {},
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
}
@ -225,24 +242,11 @@ UInt64 BackupWriterS3::getFileSize(const String & file_name)
return objects[0].GetSize();
}
bool BackupWriterS3::fileContentsEqual(const String & file_name, const String & expected_file_contents)
std::unique_ptr<ReadBuffer> BackupWriterS3::readFile(const String & file_name, size_t expected_file_size)
{
if (listObjects(*client, s3_uri, file_name).empty())
return false;
try
{
auto in = std::make_unique<ReadBufferFromS3>(
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, request_settings, read_settings);
String actual_file_contents(expected_file_contents.size(), ' ');
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
&& (actual_file_contents == expected_file_contents) && in->eof();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
return false;
}
return std::make_unique<ReadBufferFromS3>(
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, request_settings, read_settings,
false, 0, 0, false, expected_file_size);
}
std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
@ -251,9 +255,11 @@ std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
client,
s3_uri.bucket,
fs::path(s3_uri.key) / file_name,
DBMS_DEFAULT_BUFFER_SIZE,
request_settings,
std::nullopt,
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"),
write_settings);
}
void BackupWriterS3::removeFile(const String & file_name)

View File

@ -3,8 +3,8 @@
#include "config.h"
#if USE_AWS_S3
#include <Backups/BackupIO.h>
#include <IO/ReadSettings.h>
#include <Backups/BackupIO_Default.h>
#include <Disks/DiskType.h>
#include <IO/S3Common.h>
#include <Storages/StorageS3Settings.h>
#include <Interpreters/Context_fwd.h>
@ -14,7 +14,7 @@ namespace DB
{
/// Represents a backup stored to AWS S3.
class BackupReaderS3 : public IBackupReader
class BackupReaderS3 : public BackupReaderDefault
{
public:
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
@ -23,20 +23,19 @@ public:
bool fileExists(const String & file_name) override;
UInt64 getFileSize(const String & file_name) override;
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
void copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings) override;
DataSourceDescription getDataSourceDescription() const override;
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
private:
S3::URI s3_uri;
std::shared_ptr<S3::Client> client;
ReadSettings read_settings;
const S3::URI s3_uri;
const std::shared_ptr<S3::Client> client;
S3Settings::RequestSettings request_settings;
Poco::Logger * log;
const DataSourceDescription data_source_description;
};
class BackupWriterS3 : public IBackupWriter
class BackupWriterS3 : public BackupWriterDefault
{
public:
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
@ -44,42 +43,24 @@ public:
bool fileExists(const String & file_name) override;
UInt64 getFileSize(const String & file_name) override;
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
void copyDataToFile(const CreateReadBufferFunction & create_read_buffer, UInt64 offset, UInt64 size, const String & dest_file_name) override;
void copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length) override;
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
void removeFile(const String & file_name) override;
void removeFiles(const Strings & file_names) override;
DataSourceDescription getDataSourceDescription() const override;
bool supportNativeCopy(DataSourceDescription data_source_description) const override;
void copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name) override;
private:
void copyObjectImpl(
const String & src_bucket,
const String & src_key,
const String & dst_bucket,
const String & dst_key,
size_t size,
const std::optional<ObjectAttributes> & metadata = std::nullopt) const;
void copyObjectMultipartImpl(
const String & src_bucket,
const String & src_key,
const String & dst_bucket,
const String & dst_key,
size_t size,
const std::optional<ObjectAttributes> & metadata = std::nullopt) const;
std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) override;
void removeFilesBatch(const Strings & file_names);
S3::URI s3_uri;
std::shared_ptr<S3::Client> client;
const S3::URI s3_uri;
const std::shared_ptr<S3::Client> client;
S3Settings::RequestSettings request_settings;
Poco::Logger * log;
std::optional<bool> supports_batch_delete;
const DataSourceDescription data_source_description;
};
}

View File

@ -36,6 +36,7 @@ namespace ErrorCodes
extern const int WRONG_BASE_BACKUP;
extern const int BACKUP_ENTRY_NOT_FOUND;
extern const int BACKUP_IS_EMPTY;
extern const int CANNOT_RESTORE_TO_NONENCRYPTED_DISK;
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
extern const int LOGICAL_ERROR;
}
@ -339,6 +340,8 @@ void BackupImpl::writeBackupMetadata()
}
if (!info.data_file_name.empty() && (info.data_file_name != info.file_name))
*out << "<data_file>" << xml << info.data_file_name << "</data_file>";
if (info.encrypted_by_disk)
*out << "<encrypted_by_disk>true</encrypted_by_disk>";
}
total_size += info.size;
@ -444,6 +447,7 @@ void BackupImpl::readBackupMetadata()
{
info.data_file_name = getString(file_config, "data_file", info.file_name);
}
info.encrypted_by_disk = getBool(file_config, "encrypted_by_disk", false);
}
file_names.emplace(info.file_name, std::pair{info.size, info.checksum});
@ -633,6 +637,11 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const String & file_nam
}
std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const SizeAndChecksum & size_and_checksum) const
{
return readFileImpl(size_and_checksum, /* read_encrypted= */ false);
}
std::unique_ptr<SeekableReadBuffer> BackupImpl::readFileImpl(const SizeAndChecksum & size_and_checksum, bool read_encrypted) const
{
if (open_mode != OpenMode::READ)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for reading");
@ -660,6 +669,14 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const SizeAndChecksum &
info = it->second;
}
if (info.encrypted_by_disk != read_encrypted)
{
throw Exception(
ErrorCodes::CANNOT_RESTORE_TO_NONENCRYPTED_DISK,
"File {} is encrypted in the backup, it can be restored only to an encrypted disk",
info.data_file_name);
}
std::unique_ptr<SeekableReadBuffer> read_buffer;
std::unique_ptr<SeekableReadBuffer> base_read_buffer;
@ -720,14 +737,14 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const SizeAndChecksum &
}
}
size_t BackupImpl::copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings) const
size_t BackupImpl::copyFileToDisk(const String & file_name,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) const
{
return copyFileToDisk(getFileSizeAndChecksum(file_name), destination_disk, destination_path, write_mode, write_settings);
return copyFileToDisk(getFileSizeAndChecksum(file_name), destination_disk, destination_path, write_mode);
}
size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings) const
size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) const
{
if (open_mode != OpenMode::READ)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for reading");
@ -760,19 +777,26 @@ size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum, Dis
info = it->second;
}
if (info.encrypted_by_disk && !destination_disk->getDataSourceDescription().is_encrypted)
{
throw Exception(
ErrorCodes::CANNOT_RESTORE_TO_NONENCRYPTED_DISK,
"File {} is encrypted in the backup, it can be restored only to an encrypted disk",
info.data_file_name);
}
bool file_copied = false;
if (info.size && !info.base_size && !use_archive)
{
/// Data comes completely from this backup.
reader->copyFileToDisk(info.data_file_name, info.size, destination_disk, destination_path, write_mode, write_settings);
reader->copyFileToDisk(info.data_file_name, info.size, info.encrypted_by_disk, destination_disk, destination_path, write_mode);
file_copied = true;
}
else if (info.size && (info.size == info.base_size))
{
/// Data comes completely from the base backup (nothing comes from this backup).
base_backup->copyFileToDisk(std::pair{info.base_size, info.base_checksum}, destination_disk, destination_path, write_mode, write_settings);
base_backup->copyFileToDisk(std::pair{info.base_size, info.base_checksum}, destination_disk, destination_path, write_mode);
file_copied = true;
}
@ -786,9 +810,13 @@ size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum, Dis
else
{
/// Use the generic way to copy data. `readFile()` will update `num_read_files`.
auto read_buffer = readFile(size_and_checksum);
auto write_buffer = destination_disk->writeFile(destination_path, std::min<size_t>(info.size, DBMS_DEFAULT_BUFFER_SIZE),
write_mode, write_settings);
auto read_buffer = readFileImpl(size_and_checksum, /* read_encrypted= */ info.encrypted_by_disk);
std::unique_ptr<WriteBuffer> write_buffer;
size_t buf_size = std::min<size_t>(info.size, reader->getWriteBufferSize());
if (info.encrypted_by_disk)
write_buffer = destination_disk->writeEncryptedFile(destination_path, buf_size, write_mode, reader->getWriteSettings());
else
write_buffer = destination_disk->writeFile(destination_path, buf_size, write_mode, reader->getWriteSettings());
copyData(*read_buffer, *write_buffer, info.size);
write_buffer->finalize();
}
@ -805,72 +833,57 @@ void BackupImpl::writeFile(const BackupFileInfo & info, BackupEntryPtr entry)
if (writing_finalized)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is already finalized");
std::string from_file_name = "memory buffer";
if (auto fname = entry->getFilePath(); !fname.empty())
from_file_name = "file " + fname;
bool should_check_lock_file = false;
{
std::lock_guard lock{mutex};
++num_files;
total_size += info.size;
if (!num_entries)
should_check_lock_file = true;
}
auto src_disk = entry->getDisk();
auto src_file_path = entry->getFilePath();
bool from_immutable_file = entry->isFromImmutableFile();
String src_file_desc = src_file_path.empty() ? "memory buffer" : ("file " + src_file_path);
if (info.data_file_name.empty())
{
LOG_TRACE(log, "Writing backup for file {} from {}: skipped, {}", info.data_file_name, from_file_name, !info.size ? "empty" : "base backup has it");
LOG_TRACE(log, "Writing backup for file {} from {}: skipped, {}", info.data_file_name, src_file_desc, !info.size ? "empty" : "base backup has it");
return;
}
if (!coordination->startWritingFile(info.data_file_index))
{
LOG_TRACE(log, "Writing backup for file {} from {}: skipped, data file #{} is already being written", info.data_file_name, from_file_name, info.data_file_index);
LOG_TRACE(log, "Writing backup for file {} from {}: skipped, data file #{} is already being written", info.data_file_name, src_file_desc, info.data_file_index);
return;
}
LOG_TRACE(log, "Writing backup for file {} from {}: data file #{}", info.data_file_name, from_file_name, info.data_file_index);
if (!should_check_lock_file)
checkLockFile(true);
auto writer_description = writer->getDataSourceDescription();
auto reader_description = entry->getDataSourceDescription();
/// NOTE: `mutex` must be unlocked during copying otherwise writing will be in one thread maximum and hence slow.
/// We need to copy whole file without archive, we can do it faster
/// if source and destination are compatible
if (!use_archive && writer->supportNativeCopy(reader_description))
if (use_archive)
{
/// Should be much faster than writing data through server.
LOG_TRACE(log, "Will copy file {} using native copy", info.data_file_name);
/// NOTE: `mutex` must be unlocked here otherwise writing will be in one thread maximum and hence slow.
writer->copyFileNative(entry->tryGetDiskIfExists(), entry->getFilePath(), info.base_size, info.size - info.base_size, info.data_file_name);
LOG_TRACE(log, "Writing backup for file {} from {}: data file #{}, adding to archive", info.data_file_name, src_file_desc, info.data_file_index);
auto out = archive_writer->writeFile(info.data_file_name);
auto read_buffer = entry->getReadBuffer(writer->getReadSettings());
if (info.base_size != 0)
read_buffer->seek(info.base_size, SEEK_SET);
copyData(*read_buffer, *out);
out->finalize();
}
else if (src_disk && from_immutable_file)
{
LOG_TRACE(log, "Writing backup for file {} from {} (disk {}): data file #{}", info.data_file_name, src_file_desc, src_disk->getName(), info.data_file_index);
writer->copyFileFromDisk(info.data_file_name, src_disk, src_file_path, info.encrypted_by_disk, info.base_size, info.size - info.base_size);
}
else
{
bool has_entries = false;
{
std::lock_guard lock{mutex};
has_entries = num_entries > 0;
}
if (!has_entries)
checkLockFile(true);
if (use_archive)
{
LOG_TRACE(log, "Adding file {} to archive", info.data_file_name);
auto out = archive_writer->writeFile(info.data_file_name);
auto read_buffer = entry->getReadBuffer();
if (info.base_size != 0)
read_buffer->seek(info.base_size, SEEK_SET);
copyData(*read_buffer, *out);
out->finalize();
}
else
{
LOG_TRACE(log, "Will copy file {}", info.data_file_name);
auto create_read_buffer = [entry] { return entry->getReadBuffer(); };
/// NOTE: `mutex` must be unlocked here otherwise writing will be in one thread maximum and hence slow.
writer->copyDataToFile(create_read_buffer, info.base_size, info.size - info.base_size, info.data_file_name);
}
LOG_TRACE(log, "Writing backup for file {} from {}: data file #{}", info.data_file_name, src_file_desc, info.data_file_index);
auto create_read_buffer = [entry, read_settings = writer->getReadSettings()] { return entry->getReadBuffer(read_settings); };
writer->copyDataToFile(info.data_file_name, create_read_buffer, info.base_size, info.size - info.base_size);
}
{

View File

@ -76,10 +76,8 @@ public:
SizeAndChecksum getFileSizeAndChecksum(const String & file_name) const override;
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) const override;
std::unique_ptr<SeekableReadBuffer> readFile(const SizeAndChecksum & size_and_checksum) const override;
size_t copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings) const override;
size_t copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode, const WriteSettings & write_settings) const override;
size_t copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) const override;
size_t copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) const override;
void writeFile(const BackupFileInfo & info, BackupEntryPtr entry) override;
void finalizeWriting() override;
bool supportsWritingInMultipleThreads() const override { return !use_archive; }
@ -109,6 +107,8 @@ private:
/// Calculates and sets `compressed_size`.
void setCompressedSize();
std::unique_ptr<SeekableReadBuffer> readFileImpl(const SizeAndChecksum & size_and_checksum, bool read_encrypted) const;
const String backup_name_for_logging;
const bool use_archive;
const ArchiveParams archive_params;

View File

@ -23,6 +23,7 @@ namespace ErrorCodes
M(String, password) \
M(Bool, structure_only) \
M(Bool, async) \
M(Bool, decrypt_files_from_encrypted_disks) \
M(Bool, deduplicate_files) \
M(UInt64, shard_num) \
M(UInt64, replica_num) \

View File

@ -32,6 +32,9 @@ struct BackupSettings
/// Whether the BACKUP command must return immediately without waiting until the backup has completed.
bool async = false;
/// Whether the BACKUP command should decrypt files stored on encrypted disks.
bool decrypt_files_from_encrypted_disks = false;
/// Whether the BACKUP will omit similar files (within one backup only).
bool deduplicate_files = true;

View File

@ -368,6 +368,7 @@ void BackupsWorker::doBackup(
/// Wait until all the hosts have written their backup entries.
backup_coordination->waitForStage(Stage::COMPLETED);
backup_coordination->setStage(Stage::COMPLETED,"");
}
else
{
@ -385,7 +386,7 @@ void BackupsWorker::doBackup(
writeBackupEntries(backup, std::move(backup_entries), backup_id, backup_coordination, backup_settings.internal);
/// We have written our backup entries, we need to tell other hosts (they could be waiting for it).
backup_coordination->setStage(Stage::COMPLETED, "");
backup_coordination->setStage(Stage::COMPLETED,"");
}
size_t num_files = 0;
@ -654,12 +655,26 @@ void BackupsWorker::doRestore(
/// (If this isn't ON CLUSTER query RestorerFromBackup will check access rights later.)
ClusterPtr cluster;
bool on_cluster = !restore_query->cluster.empty();
if (on_cluster)
{
restore_query->cluster = context->getMacros()->expand(restore_query->cluster);
cluster = context->getCluster(restore_query->cluster);
restore_settings.cluster_host_ids = cluster->getHostIDs();
}
/// Make a restore coordination.
if (!restore_coordination)
restore_coordination = makeRestoreCoordination(context, restore_settings, /* remote= */ on_cluster);
if (!allow_concurrent_restores && restore_coordination->hasConcurrentRestores(std::ref(num_active_restores)))
throw Exception(
ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED,
"Concurrent restores not supported, turn on setting 'allow_concurrent_restores'");
if (on_cluster)
{
/// We cannot just use access checking provided by the function executeDDLQueryOnCluster(): it would be incorrect
/// because different replicas can contain different set of tables and so the required access rights can differ too.
/// So the right way is pass through the entire cluster and check access for each host.
@ -676,15 +691,6 @@ void BackupsWorker::doRestore(
}
}
/// Make a restore coordination.
if (!restore_coordination)
restore_coordination = makeRestoreCoordination(context, restore_settings, /* remote= */ on_cluster);
if (!allow_concurrent_restores && restore_coordination->hasConcurrentRestores(std::ref(num_active_restores)))
throw Exception(
ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED,
"Concurrent restores not supported, turn on setting 'allow_concurrent_restores'");
/// Do RESTORE.
if (on_cluster)
{
@ -703,6 +709,7 @@ void BackupsWorker::doRestore(
/// Wait until all the hosts have written their backup entries.
restore_coordination->waitForStage(Stage::COMPLETED);
restore_coordination->setStage(Stage::COMPLETED,"");
}
else
{

View File

@ -109,10 +109,10 @@ public:
/// Copies a file from the backup to a specified destination disk. Returns the number of bytes written.
virtual size_t copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode = WriteMode::Rewrite, const WriteSettings & write_settings = {}) const = 0;
WriteMode write_mode = WriteMode::Rewrite) const = 0;
virtual size_t copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path,
WriteMode write_mode = WriteMode::Rewrite, const WriteSettings & write_settings = {}) const = 0;
WriteMode write_mode = WriteMode::Rewrite) const = 0;
/// Puts a new entry to the backup.
virtual void writeFile(const BackupFileInfo & file_info, BackupEntryPtr entry) = 0;

View File

@ -17,23 +17,16 @@ class IBackupEntriesLazyBatch::BackupEntryFromBatch : public IBackupEntry
public:
BackupEntryFromBatch(const std::shared_ptr<IBackupEntriesLazyBatch> & batch_, size_t index_) : batch(batch_), index(index_) { }
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const override { return getInternalBackupEntry()->getReadBuffer(read_settings); }
UInt64 getSize() const override { return getInternalBackupEntry()->getSize(); }
std::optional<UInt128> getChecksum() const override { return getInternalBackupEntry()->getChecksum(); }
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override { return getInternalBackupEntry()->getReadBuffer(); }
String getFilePath() const override
{
return getInternalBackupEntry()->getFilePath();
}
DiskPtr tryGetDiskIfExists() const override
{
return getInternalBackupEntry()->tryGetDiskIfExists();
}
DataSourceDescription getDataSourceDescription() const override
{
return getInternalBackupEntry()->getDataSourceDescription();
}
UInt128 getChecksum() const override { return getInternalBackupEntry()->getChecksum(); }
std::optional<UInt128> getPartialChecksum(size_t prefix_length) const override { return getInternalBackupEntry()->getPartialChecksum(prefix_length); }
DataSourceDescription getDataSourceDescription() const override { return getInternalBackupEntry()->getDataSourceDescription(); }
bool isEncryptedByDisk() const override { return getInternalBackupEntry()->isEncryptedByDisk(); }
bool isFromFile() const override { return getInternalBackupEntry()->isFromFile(); }
bool isFromImmutableFile() const override { return getInternalBackupEntry()->isFromImmutableFile(); }
String getFilePath() const override { return getInternalBackupEntry()->getFilePath(); }
DiskPtr getDisk() const override { return getInternalBackupEntry()->getDisk(); }
private:
BackupEntryPtr getInternalBackupEntry() const

View File

@ -20,16 +20,24 @@ public:
/// Returns the size of the data.
virtual UInt64 getSize() const = 0;
/// Returns the checksum of the data if it's precalculated.
/// Can return nullopt which means the checksum should be calculated from the read buffer.
virtual std::optional<UInt128> getChecksum() const { return {}; }
/// Returns the checksum of the data.
virtual UInt128 getChecksum() const = 0;
/// Returns a partial checksum, i.e. the checksum calculated for a prefix part of the data.
/// Can return nullopt if the partial checksum is too difficult to calculate.
virtual std::optional<UInt128> getPartialChecksum(size_t /* prefix_length */) const { return {}; }
/// Returns a read buffer for reading the data.
virtual std::unique_ptr<SeekableReadBuffer> getReadBuffer() const = 0;
virtual std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const = 0;
virtual String getFilePath() const = 0;
/// Returns true if the data returned by getReadBuffer() is encrypted by an encrypted disk.
virtual bool isEncryptedByDisk() const { return false; }
virtual DiskPtr tryGetDiskIfExists() const = 0;
/// Returns information about disk and file if this backup entry is generated from a file.
virtual bool isFromFile() const { return false; }
virtual bool isFromImmutableFile() const { return false; }
virtual String getFilePath() const { return ""; }
virtual DiskPtr getDisk() const { return nullptr; }
virtual DataSourceDescription getDataSourceDescription() const = 0;
};

View File

@ -93,7 +93,10 @@ void RestoreCoordinationRemote::createRootNodes()
void RestoreCoordinationRemote::setStage(const String & new_stage, const String & message)
{
stage_sync->set(current_host, new_stage, message);
if (is_internal)
stage_sync->set(current_host, new_stage, message);
else
stage_sync->set(current_host, new_stage, /* message */ "", /* all_hosts */ true);
}
void RestoreCoordinationRemote::setError(const Exception & exception)
@ -283,8 +286,8 @@ bool RestoreCoordinationRemote::hasConcurrentRestores(const std::atomic<size_t>
String status;
if (zk->tryGet(root_zookeeper_path + "/" + existing_restore_path + "/stage", status))
{
/// If status is not COMPLETED it could be because the restore failed, check if 'error' exists
if (status != Stage::COMPLETED && !zk->exists(root_zookeeper_path + "/" + existing_restore_path + "/error"))
/// Check if some other restore is in progress
if (status == Stage::SCHEDULED_TO_START)
{
LOG_WARNING(log, "Found a concurrent restore: {}, current restore: {}", existing_restore_uuid, toString(restore_uuid));
result = true;

View File

@ -169,9 +169,9 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
{
std::shared_ptr<IBackupReader> reader;
if (engine_name == "File")
reader = std::make_shared<BackupReaderFile>(path);
reader = std::make_shared<BackupReaderFile>(path, params.context);
else
reader = std::make_shared<BackupReaderDisk>(disk, path);
reader = std::make_shared<BackupReaderDisk>(disk, path, params.context);
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
}
else

View File

@ -528,7 +528,7 @@ target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::fast_float)
if (USE_ORC)
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR} "${CMAKE_BINARY_DIR}/contrib/orc/c++/include")
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR} "${PROJECT_BINARY_DIR}/contrib/orc/c++/include")
endif ()
if (TARGET ch_contrib::rocksdb)

View File

@ -4,6 +4,8 @@
namespace ProfileEvents
{
extern const Event DistributedConnectionTries;
extern const Event DistributedConnectionUsable;
extern const Event DistributedConnectionMissingTable;
extern const Event DistributedConnectionStaleReplica;
}
@ -35,6 +37,7 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
SCOPE_EXIT(is_finished = true);
try
{
ProfileEvents::increment(ProfileEvents::DistributedConnectionTries);
result.entry = pool->get(*timeouts, settings, /* force_connected = */ false);
AsyncCallbackSetter async_setter(&*result.entry, std::move(async_callback));
@ -45,6 +48,7 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
if (!table_to_check || server_revision < DBMS_MIN_REVISION_WITH_TABLES_STATUS)
{
result.entry->forceConnected(*timeouts);
ProfileEvents::increment(ProfileEvents::DistributedConnectionUsable);
result.is_usable = true;
result.is_up_to_date = true;
return;
@ -65,6 +69,7 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
return;
}
ProfileEvents::increment(ProfileEvents::DistributedConnectionUsable);
result.is_usable = true;
UInt64 max_allowed_delay = settings ? UInt64(settings->max_replica_delay_for_distributed_queries) : 0;

View File

@ -135,7 +135,6 @@ private:
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
Int64 priority; /// priority from <remote_servers>
};
/**
@ -192,6 +191,7 @@ inline bool operator==(const ConnectionPoolFactory::Key & lhs, const ConnectionP
{
return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port
&& lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password
&& lhs.quota_key == rhs.quota_key
&& lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name
&& lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority;
}

View File

@ -73,9 +73,9 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
Int64 ConnectionPoolWithFailover::getPriority() const
{
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto &a, const auto &b)
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
{
return a->getPriority() - b->getPriority();
return a->getPriority() < b->getPriority();
}))->getPriority();
}

View File

@ -579,6 +579,7 @@
M(694, ASYNC_LOAD_CYCLE) \
M(695, ASYNC_LOAD_FAILED) \
M(696, ASYNC_LOAD_CANCELED) \
M(697, CANNOT_RESTORE_TO_NONENCRYPTED_DISK) \
\
M(999, KEEPER_EXCEPTION) \
M(1000, POCO_EXCEPTION) \

View File

@ -10,6 +10,10 @@
* Instead of this class, you could just use the pair (version, key) in the HashSet as the key
* but then the table would accumulate all the keys that it ever stored, and it was unreasonably growing.
* This class goes a step further and considers the keys with the old version empty in the hash table.
*
* Zero values note:
* A cell in ClearableHashSet can store a zero values as normal value
* If its version is equal to the version of the set itself, then it's not considered as empty even key's value is zero value of the corresponding type
*/
@ -48,30 +52,6 @@ struct ClearableHashTableCell : public BaseCell
ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {}
};
using StringRefBaseCell = HashSetCellWithSavedHash<StringRef, DefaultHash<StringRef>, ClearableHashSetState>;
/// specialization for StringRef to allow zero size key (empty string)
template <>
struct ClearableHashTableCell<StringRef, StringRefBaseCell> : public StringRefBaseCell
{
using State = ClearableHashSetState;
using value_type = typename StringRefBaseCell::value_type;
UInt32 version;
bool isZero(const State & state) const { return version != state.version; }
static bool isZero(const StringRef & key_, const State & state_) { return StringRefBaseCell::isZero(key_, state_); }
/// Set the key value to zero.
void setZero() { version = 0; }
/// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table).
static constexpr bool need_zero_value_storage = true;
ClearableHashTableCell() { } /// NOLINT
ClearableHashTableCell(const StringRef & key_, const State & state) : StringRefBaseCell(key_, state), version(state.version) { }
};
template <
typename Key,
typename Hash = DefaultHash<Key>,
@ -90,13 +70,6 @@ public:
{
++this->version;
this->m_size = 0;
if constexpr (Cell::need_zero_value_storage)
{
/// clear ZeroValueStorage
if (this->hasZero())
this->clearHasZero();
}
}
};
@ -119,13 +92,6 @@ public:
{
++this->version;
this->m_size = 0;
if constexpr (Cell::need_zero_value_storage)
{
/// clear ZeroValueStorage
if (this->hasZero())
this->clearHasZero();
}
}
};

View File

@ -358,7 +358,7 @@ public:
std::pair<LookupResult, bool> res;
emplace(Cell::getKey(x), res.first, res.second);
if (res.second)
insertSetMapped(res.first->getMapped(), x);
res.first->setMapped(x);
return res;
}

View File

@ -9,6 +9,8 @@
/** NOTE HashMap could only be used for memmoveable (position independent) types.
* Example: std::string is not position independent in libstdc++ with C++11 ABI or in libc++.
* Also, key in hash table must be of type, that zero bytes is compared equals to zero key.
*
* Please keep in sync with PackedHashMap.h
*/
namespace DB
@ -53,13 +55,13 @@ PairNoInit<std::decay_t<First>, std::decay_t<Second>> makePairNoInit(First && fi
}
template <typename Key, typename TMapped, typename Hash, typename TState = HashTableNoState>
template <typename Key, typename TMapped, typename Hash, typename TState = HashTableNoState, typename Pair = PairNoInit<Key, TMapped>>
struct HashMapCell
{
using Mapped = TMapped;
using State = TState;
using value_type = PairNoInit<Key, Mapped>;
using value_type = Pair;
using mapped_type = Mapped;
using key_type = Key;
@ -151,14 +153,14 @@ struct HashMapCell
namespace std
{
template <typename Key, typename TMapped, typename Hash, typename TState>
struct tuple_size<HashMapCell<Key, TMapped, Hash, TState>> : std::integral_constant<size_t, 2> { };
template <typename Key, typename TMapped, typename Hash, typename TState, typename Pair>
struct tuple_size<HashMapCell<Key, TMapped, Hash, TState, Pair>> : std::integral_constant<size_t, 2> { };
template <typename Key, typename TMapped, typename Hash, typename TState>
struct tuple_element<0, HashMapCell<Key, TMapped, Hash, TState>> { using type = Key; };
template <typename Key, typename TMapped, typename Hash, typename TState, typename Pair>
struct tuple_element<0, HashMapCell<Key, TMapped, Hash, TState, Pair>> { using type = Key; };
template <typename Key, typename TMapped, typename Hash, typename TState>
struct tuple_element<1, HashMapCell<Key, TMapped, Hash, TState>> { using type = TMapped; };
template <typename Key, typename TMapped, typename Hash, typename TState, typename Pair>
struct tuple_element<1, HashMapCell<Key, TMapped, Hash, TState, Pair>> { using type = TMapped; };
}
template <typename Key, typename TMapped, typename Hash, typename TState = HashTableNoState>

View File

@ -41,6 +41,8 @@ public:
using Base = HashTable<Key, TCell, Hash, Grower, Allocator>;
using typename Base::LookupResult;
using Base::Base;
void merge(const Self & rhs)
{
if (!this->hasZero() && rhs.hasZero())

View File

@ -117,7 +117,7 @@ inline bool bitEquals(T && a, T && b)
* 3) Hash tables that store the key and do not have a "mapped" value, e.g. the normal HashTable.
* GetKey returns the key, and GetMapped returns a zero void pointer. This simplifies generic
* code that works with mapped values: it can overload on the return type of GetMapped(), and
* doesn't need other parameters. One example is insertSetMapped() function.
* doesn't need other parameters. One example is Cell::setMapped() function.
*
* 4) Hash tables that store both the key and the "mapped" value, e.g. HashMap. Both GetKey and
* GetMapped are supported.
@ -216,17 +216,6 @@ struct HashTableCell
};
/**
* A helper function for HashTable::insert() to set the "mapped" value.
* Overloaded on the mapped type, does nothing if it's VoidMapped.
*/
template <typename ValueType>
void insertSetMapped(VoidMapped /* dest */, const ValueType & /* src */) {}
template <typename MappedType, typename ValueType>
void insertSetMapped(MappedType & dest, const ValueType & src) { dest = src.second; }
/** Determines the size of the hash table, and when and how much it should be resized.
* Has very small state (one UInt8) and useful for Set-s allocated in automatic memory (see uniqExact as an example).
*/
@ -241,6 +230,8 @@ struct HashTableGrower
/// If collision resolution chains are contiguous, we can implement erase operation by moving the elements.
static constexpr auto performs_linear_probing_with_single_step = true;
static constexpr size_t max_size_degree = 23;
/// The size of the hash table in the cells.
size_t bufSize() const { return 1ULL << size_degree; }
@ -259,17 +250,18 @@ struct HashTableGrower
/// Increase the size of the hash table.
void increaseSize()
{
size_degree += size_degree >= 23 ? 1 : 2;
size_degree += size_degree >= max_size_degree ? 1 : 2;
}
/// Set the buffer size by the number of elements in the hash table. Used when deserializing a hash table.
void set(size_t num_elems)
{
size_degree = num_elems <= 1
? initial_size_degree
: ((initial_size_degree > static_cast<size_t>(log2(num_elems - 1)) + 2)
? initial_size_degree
: (static_cast<size_t>(log2(num_elems - 1)) + 2));
if (num_elems <= 1)
size_degree = initial_size_degree;
else if (initial_size_degree > static_cast<size_t>(log2(num_elems - 1)) + 2)
size_degree = initial_size_degree;
else
size_degree = static_cast<size_t>(log2(num_elems - 1)) + 2;
}
void setBufSize(size_t buf_size_)
@ -281,6 +273,7 @@ struct HashTableGrower
/** Determines the size of the hash table, and when and how much it should be resized.
* This structure is aligned to cache line boundary and also occupies it all.
* Precalculates some values to speed up lookups and insertion into the HashTable (and thus has bigger memory footprint than HashTableGrower).
* This grower assume 0.5 load factor
*/
template <size_t initial_size_degree = 8>
class alignas(64) HashTableGrowerWithPrecalculation
@ -290,6 +283,7 @@ class alignas(64) HashTableGrowerWithPrecalculation
UInt8 size_degree = initial_size_degree;
size_t precalculated_mask = (1ULL << initial_size_degree) - 1;
size_t precalculated_max_fill = 1ULL << (initial_size_degree - 1);
static constexpr size_t max_size_degree = 23;
public:
UInt8 sizeDegree() const { return size_degree; }
@ -319,16 +313,17 @@ public:
bool overflow(size_t elems) const { return elems > precalculated_max_fill; }
/// Increase the size of the hash table.
void increaseSize() { increaseSizeDegree(size_degree >= 23 ? 1 : 2); }
void increaseSize() { increaseSizeDegree(size_degree >= max_size_degree ? 1 : 2); }
/// Set the buffer size by the number of elements in the hash table. Used when deserializing a hash table.
void set(size_t num_elems)
{
size_degree = num_elems <= 1
? initial_size_degree
: ((initial_size_degree > static_cast<size_t>(log2(num_elems - 1)) + 2)
? initial_size_degree
: (static_cast<size_t>(log2(num_elems - 1)) + 2));
if (num_elems <= 1)
size_degree = initial_size_degree;
else if (initial_size_degree > static_cast<size_t>(log2(num_elems - 1)) + 2)
size_degree = initial_size_degree;
else
size_degree = static_cast<size_t>(log2(num_elems - 1)) + 2;
increaseSizeDegree(0);
}
@ -753,6 +748,7 @@ protected:
public:
using key_type = Key;
using grower_type = Grower;
using mapped_type = typename Cell::mapped_type;
using value_type = typename Cell::value_type;
using cell_type = Cell;
@ -770,6 +766,14 @@ public:
alloc(grower);
}
explicit HashTable(const Grower & grower_)
: grower(grower_)
{
if (Cell::need_zero_value_storage)
this->zeroValue()->setZero();
alloc(grower);
}
HashTable(size_t reserve_for_num_elements) /// NOLINT
{
if (Cell::need_zero_value_storage)
@ -1037,7 +1041,7 @@ public:
}
if (res.second)
insertSetMapped(res.first->getMapped(), x);
res.first->setMapped(x);
return res;
}

View File

@ -88,8 +88,12 @@ inline StringRef & ALWAYS_INLINE keyHolderGetKey(DB::ArenaKeyHolder & holder)
inline void ALWAYS_INLINE keyHolderPersistKey(DB::ArenaKeyHolder & holder)
{
// Hash table shouldn't ask us to persist a zero key
assert(holder.key.size > 0);
// Normally, our hash table shouldn't ask to persist a zero key,
// but it can happened in the case of clearable hash table (ClearableHashSet, for example).
// The clearable hash table doesn't use zero storage and
// distinguishes empty keys by using cell version, not the value itself.
// So, when an empty StringRef is inserted in ClearableHashSet we'll get here key of zero size.
// assert(holder.key.size > 0);
holder.key.data = holder.pool.insert(holder.key.data, holder.key.size);
}

View File

@ -0,0 +1,107 @@
#pragma once
/// Packed versions HashMap, please keep in sync with HashMap.h
#include <Common/HashTable/HashMap.h>
/// A pair that does not initialize the elements, if not needed.
///
/// NOTE: makePairNoInit() is omitted for PackedPairNoInit since it is not
/// required for PackedHashMap (see mergeBlockWithPipe() for details)
template <typename First, typename Second>
struct __attribute__((packed)) PackedPairNoInit
{
First first;
Second second;
PackedPairNoInit() {} /// NOLINT
template <typename FirstValue>
PackedPairNoInit(FirstValue && first_, NoInitTag)
: first(std::forward<FirstValue>(first_))
{
}
template <typename FirstValue, typename SecondValue>
PackedPairNoInit(FirstValue && first_, SecondValue && second_)
: first(std::forward<FirstValue>(first_))
, second(std::forward<SecondValue>(second_))
{
}
};
/// The difference with ZeroTraits is that PackedZeroTraits accepts PackedPairNoInit instead of Key.
namespace PackedZeroTraits
{
template <typename First, typename Second, template <typename, typename> class PackedPairNoInit>
bool check(const PackedPairNoInit<First, Second> p) { return p.first == First{}; }
template <typename First, typename Second, template <typename, typename> class PackedPairNoInit>
void set(PackedPairNoInit<First, Second> & p) { p.first = First{}; }
}
/// setZero() should be overwritten to pass the pair instead of key, to avoid
/// "reference binding to misaligned address" errors from UBsan.
template <typename Key, typename TMapped, typename Hash, typename TState = HashTableNoState>
struct PackedHashMapCell : public HashMapCell<Key, TMapped, Hash, TState, PackedPairNoInit<Key, TMapped>>
{
using Base = HashMapCell<Key, TMapped, Hash, TState, PackedPairNoInit<Key, TMapped>>;
using State = typename Base::State;
using value_type = typename Base::value_type;
using key_type = typename Base::key_type;
using Mapped = typename Base::Mapped;
using Base::Base;
void setZero() { PackedZeroTraits::set(this->value); }
Key getKey() const { return this->value.first; }
static Key getKey(const value_type & value_) { return value_.first; }
Mapped & getMapped() { return this->value.second; }
Mapped getMapped() const { return this->value.second; }
value_type getValue() const { return this->value; }
bool keyEquals(const Key key_) const { return bitEqualsByValue(this->value.first, key_); }
bool keyEquals(const Key key_, size_t /*hash_*/) const { return bitEqualsByValue(this->value.first, key_); }
bool keyEquals(const Key key_, size_t /*hash_*/, const State & /*state*/) const { return bitEqualsByValue(this->value.first, key_); }
bool isZero(const State & state) const { return isZero(this->value.first, state); }
static bool isZero(const Key key, const State & /*state*/) { return ZeroTraits::check(key); }
static inline bool bitEqualsByValue(key_type a, key_type b) { return a == b; }
template <size_t I>
auto get() const
{
if constexpr (I == 0) return this->value.first;
else if constexpr (I == 1) return this->value.second;
}
};
namespace std
{
template <typename Key, typename TMapped, typename Hash, typename TState>
struct tuple_size<PackedHashMapCell<Key, TMapped, Hash, TState>> : std::integral_constant<size_t, 2> { };
template <typename Key, typename TMapped, typename Hash, typename TState>
struct tuple_element<0, PackedHashMapCell<Key, TMapped, Hash, TState>> { using type = Key; };
template <typename Key, typename TMapped, typename Hash, typename TState>
struct tuple_element<1, PackedHashMapCell<Key, TMapped, Hash, TState>> { using type = TMapped; };
}
/// Packed HashMap - HashMap with structure without padding
///
/// Sometimes padding in structure can be crucial, consider the following
/// example <UInt64, UInt16> as <Key, Value> in this case the padding overhead
/// is 0.375, and this can be major in case of lots of keys.
///
/// Note, there is no need to provide PackedHashSet, since it cannot have padding.
template <
typename Key,
typename Mapped,
typename Hash = DefaultHash<Key>,
typename Grower = HashTableGrower<>,
typename Allocator = HashTableAllocator>
using PackedHashMap = HashMapTable<Key, PackedHashMapCell<Key, Mapped, Hash, HashTableNoState>, Hash, Grower, Allocator>;

View File

@ -224,7 +224,7 @@ public:
emplace(Cell::getKey(x), res.first, res.second, hash_value);
if (res.second)
insertSetMapped(res.first->getMapped(), x);
res.first->setMapped(x);
return res;
}

View File

@ -63,12 +63,13 @@ const char * analyzeImpl(
bool is_first_call = begin == regexp.data();
int depth = 0;
is_trivial = true;
bool is_prefix = true;
required_substring.clear();
bool has_alternative_on_depth_0 = false;
bool has_case_insensitive_flag = false;
/// Substring with a position.
using Substring = std::pair<std::string, size_t>;
/// Substring with is_prefix.
using Substring = std::pair<std::string, bool>;
using Substrings = std::vector<Substring>;
Substrings trivial_substrings(1);
@ -98,6 +99,9 @@ const char * analyzeImpl(
auto finish_non_trivial_char = [&](bool create_new_substr = true)
{
is_trivial = false;
if (create_new_substr)
is_prefix = false;
if (depth != 0)
return;
@ -106,6 +110,7 @@ const char * analyzeImpl(
if (alter.suffix)
{
alter.literal += last_substring->first;
alter.suffix = false;
}
}
@ -126,16 +131,24 @@ const char * analyzeImpl(
if (alter.prefix)
{
alter.literal = last_substring->first + alter.literal;
alter.prefix = is_prefix;
}
}
if (group_required_string.prefix)
{
last_substring->first += group_required_string.literal;
last_substring->second = is_prefix;
}
else
{
finish_non_trivial_char();
last_substring->first = group_required_string.literal;
last_substring->second = false;
}
is_prefix = is_prefix && group_required_string.prefix && group_required_string.suffix;
/// if we can still append, no need to finish it. e.g. abc(de)fg should capture abcdefg
if (!last_substring->first.empty() && !group_required_string.suffix)
{
@ -185,7 +198,6 @@ const char * analyzeImpl(
goto ordinary;
default:
/// all other escape sequences are not supported
is_trivial = false;
finish_non_trivial_char();
break;
}
@ -196,6 +208,7 @@ const char * analyzeImpl(
case '|':
is_trivial = false;
is_prefix = false;
++pos;
if (depth == 0)
{
@ -205,6 +218,7 @@ const char * analyzeImpl(
break;
case '(':
/// bracket does not break is_prefix. for example abc(d) has a prefix 'abcd'
is_trivial = false;
if (!in_square_braces)
{
@ -258,7 +272,6 @@ const char * analyzeImpl(
case '[':
in_square_braces = true;
++depth;
is_trivial = false;
finish_non_trivial_char();
++pos;
break;
@ -270,7 +283,6 @@ const char * analyzeImpl(
--depth;
if (depth == 0)
in_square_braces = false;
is_trivial = false;
finish_non_trivial_char();
++pos;
break;
@ -284,7 +296,6 @@ const char * analyzeImpl(
break;
case '^': case '$': case '.': case '+':
is_trivial = false;
finish_non_trivial_char();
++pos;
break;
@ -296,7 +307,6 @@ const char * analyzeImpl(
case '?':
[[fallthrough]];
case '*':
is_trivial = false;
if (depth == 0 && !last_substring->first.empty() && !in_square_braces)
{
last_substring->first.resize(last_substring->first.size() - 1);
@ -318,8 +328,9 @@ const char * analyzeImpl(
default:
if (depth == 0 && !in_curly_braces && !in_square_braces)
{
/// record the first position of last string.
if (last_substring->first.empty())
last_substring->second = pos - begin;
last_substring->second = is_prefix;
last_substring->first.push_back(*pos);
}
++pos;
@ -328,10 +339,9 @@ const char * analyzeImpl(
}
finish:
finish_non_trivial_char(false);
if (!is_trivial)
{
finish_non_trivial_char(false);
/// we calculate required substring even though has_alternative_on_depth_0.
/// we will clear the required substring after putting it to alternatives.
if (!has_case_insensitive_flag)
@ -357,7 +367,7 @@ finish:
if (max_length >= MIN_LENGTH_FOR_STRSTR || (!is_first_call && max_length > 0))
{
required_substring.literal = candidate_it->first;
required_substring.prefix = candidate_it->second == 0;
required_substring.prefix = candidate_it->second;
required_substring.suffix = candidate_it + 1 == trivial_substrings.end();
}
}
@ -365,7 +375,8 @@ finish:
else if (!trivial_substrings.empty())
{
required_substring.literal = trivial_substrings.front().first;
required_substring.prefix = trivial_substrings.front().second == 0;
/// trivial string means the whole regex is a simple string literal, so the prefix and suffix should be true.
required_substring.prefix = true;
required_substring.suffix = true;
}

View File

@ -7,7 +7,13 @@
#include <Common/logger_useful.h>
#include <Common/Exception.h>
#include <Common/ProfileEvents.h>
#include <Common/Stopwatch.h>
namespace ProfileEvents
{
extern const Event ConnectionPoolIsFullMicroseconds;
}
namespace DB
{
@ -144,17 +150,19 @@ public:
return Entry(*items.back());
}
Stopwatch blocked;
if (timeout < 0)
{
LOG_INFO(log, "No free connections in pool. Waiting undefinitelly.");
LOG_INFO(log, "No free connections in pool. Waiting indefinitely.");
available.wait(lock);
}
else
{
auto timeout_ms = std::chrono::microseconds(timeout);
auto timeout_ms = std::chrono::milliseconds(timeout);
LOG_INFO(log, "No free connections in pool. Waiting {} ms.", timeout_ms.count());
available.wait_for(lock, timeout_ms);
}
ProfileEvents::increment(ProfileEvents::ConnectionPoolIsFullMicroseconds, blocked.elapsedMicroseconds());
}
}

View File

@ -101,7 +101,7 @@ public:
struct ShuffledPool
{
NestedPool * pool{};
const PoolState * state{};
const PoolState * state{}; // WARNING: valid only during initial ordering, dangling
size_t index = 0;
size_t error_count = 0;
size_t slowdown_count = 0;
@ -115,7 +115,6 @@ public:
/// this functor. The pools with lower result value will be tried first.
using GetPriorityFunc = std::function<size_t(size_t index)>;
/// Returns at least min_entries and at most max_entries connections (at most one connection per nested pool).
/// The method will throw if it is unable to get min_entries alive connections or
/// if fallback_to_stale_replicas is false and it is unable to get min_entries connections to up-to-date replicas.
@ -175,10 +174,11 @@ PoolWithFailoverBase<TNestedPool>::getShuffledPools(
}
/// Sort the pools into order in which they will be tried (based on respective PoolStates).
/// Note that `error_count` and `slowdown_count` are used for ordering, but set to zero in the resulting ShuffledPool
std::vector<ShuffledPool> shuffled_pools;
shuffled_pools.reserve(nested_pools.size());
for (size_t i = 0; i < nested_pools.size(); ++i)
shuffled_pools.push_back(ShuffledPool{nested_pools[i].get(), &pool_states[i], i, 0});
shuffled_pools.push_back(ShuffledPool{nested_pools[i].get(), &pool_states[i], i, /* error_count = */ 0, /* slowdown_count = */ 0});
::sort(
shuffled_pools.begin(), shuffled_pools.end(),
[](const ShuffledPool & lhs, const ShuffledPool & rhs)
@ -227,6 +227,10 @@ PoolWithFailoverBase<TNestedPool>::getMany(
{
std::vector<ShuffledPool> shuffled_pools = getShuffledPools(max_ignored_errors, get_priority);
/// Limit `max_tries` value by `max_error_cap` to avoid unlimited number of retries
if (max_tries > max_error_cap)
max_tries = max_error_cap;
/// We will try to get a connection from each pool until a connection is produced or max_tries is reached.
std::vector<TryResult> try_results(shuffled_pools.size());
size_t entries_count = 0;
@ -371,7 +375,7 @@ PoolWithFailoverBase<TNestedPool>::updatePoolStates(size_t max_ignored_errors)
/// distributed_replica_max_ignored_errors
for (auto & state : result)
state.error_count = std::max<UInt64>(0, state.error_count - max_ignored_errors);
state.error_count = state.error_count > max_ignored_errors ? state.error_count - max_ignored_errors : 0;
return result;
}

View File

@ -131,6 +131,8 @@
M(ZooKeeperBytesSent, "Number of bytes send over network while communicating with ZooKeeper.") \
M(ZooKeeperBytesReceived, "Number of bytes received over network while communicating with ZooKeeper.") \
\
M(DistributedConnectionTries, "Total count of distributed connection attempts.") \
M(DistributedConnectionUsable, "Total count of successful distributed connections to a usable server (with required table, but maybe stale).") \
M(DistributedConnectionFailTry, "Total count when distributed connection fails with retry.") \
M(DistributedConnectionMissingTable, "Number of times we rejected a replica from a distributed query, because it did not contain a table needed for the query.") \
M(DistributedConnectionStaleReplica, "Number of times we rejected a replica from a distributed query, because some table needed for a query had replication lag higher than the configured threshold.") \
@ -501,6 +503,8 @@ The server successfully detected this situation and will download merged part fr
M(MergeTreeReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.") \
M(MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds, "Time spent in sending the announcement from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.") \
\
M(ConnectionPoolIsFullMicroseconds, "Total time spent waiting for a slot in connection pool.") \
\
M(LogTest, "Number of log messages with level Test") \
M(LogTrace, "Number of log messages with level Trace") \
M(LogDebug, "Number of log messages with level Debug") \

View File

@ -61,20 +61,20 @@ UInt64 Throttler::add(size_t amount)
throw Exception::createDeprecated(limit_exceeded_exception_message + std::string(" Maximum: ") + toString(limit), ErrorCodes::LIMIT_EXCEEDED);
/// Wait unless there is positive amount of tokens - throttling
Int64 sleep_time = 0;
Int64 sleep_time_ns = 0;
if (max_speed && tokens_value < 0)
{
sleep_time = static_cast<Int64>(-tokens_value / max_speed * NS);
accumulated_sleep += sleep_time;
sleepForNanoseconds(sleep_time);
accumulated_sleep -= sleep_time;
ProfileEvents::increment(ProfileEvents::ThrottlerSleepMicroseconds, sleep_time / 1000UL);
sleep_time_ns = static_cast<Int64>(-tokens_value / max_speed * NS);
accumulated_sleep += sleep_time_ns;
sleepForNanoseconds(sleep_time_ns);
accumulated_sleep -= sleep_time_ns;
ProfileEvents::increment(ProfileEvents::ThrottlerSleepMicroseconds, sleep_time_ns / 1000UL);
}
if (parent)
sleep_time += parent->add(amount);
sleep_time_ns += parent->add(amount);
return static_cast<UInt64>(sleep_time);
return static_cast<UInt64>(sleep_time_ns);
}
void Throttler::reset()

View File

@ -34,15 +34,15 @@ public:
const std::shared_ptr<Throttler> & parent_ = nullptr);
/// Use `amount` tokens, sleeps if required or throws exception on limit overflow.
/// Returns duration of sleep in microseconds (to distinguish sleeping on different kinds of throttlers for metrics)
/// Returns duration of sleep in nanoseconds (to distinguish sleeping on different kinds of throttlers for metrics)
UInt64 add(size_t amount);
UInt64 add(size_t amount, ProfileEvents::Event event_amount, ProfileEvents::Event event_sleep_us)
{
UInt64 sleep_us = add(amount);
UInt64 sleep_ns = add(amount);
ProfileEvents::increment(event_amount, amount);
ProfileEvents::increment(event_sleep_us, sleep_us);
return sleep_us;
ProfileEvents::increment(event_sleep_us, sleep_ns / 1000UL);
return sleep_ns;
}
/// Not thread safe

View File

@ -466,6 +466,8 @@ public:
/// Useful to check owner of ephemeral node.
virtual int64_t getSessionID() const = 0;
virtual String getConnectedAddress() const = 0;
/// If the method will throw an exception, callbacks won't be called.
///
/// After the method is executed successfully, you must wait for callbacks

View File

@ -39,6 +39,7 @@ public:
bool isExpired() const override { return expired; }
int64_t getSessionID() const override { return 0; }
String getConnectedAddress() const override { return connected_zk_address; }
void create(
@ -126,6 +127,8 @@ private:
zkutil::ZooKeeperArgs args;
String connected_zk_address;
std::mutex push_request_mutex;
std::atomic<bool> expired{false};

View File

@ -111,6 +111,26 @@ void ZooKeeper::init(ZooKeeperArgs args_)
LOG_TRACE(log, "Initialized, hosts: {}", fmt::join(args.hosts, ","));
else
LOG_TRACE(log, "Initialized, hosts: {}, chroot: {}", fmt::join(args.hosts, ","), args.chroot);
String address = impl->getConnectedAddress();
size_t colon_pos = address.find(':');
connected_zk_host = address.substr(0, colon_pos);
connected_zk_port = address.substr(colon_pos + 1);
connected_zk_index = 0;
if (args.hosts.size() > 1)
{
for (size_t i = 0; i < args.hosts.size(); i++)
{
if (args.hosts[i] == address)
{
connected_zk_index = i;
break;
}
}
}
}
else if (args.implementation == "testkeeper")
{

View File

@ -523,6 +523,10 @@ public:
void setServerCompletelyStarted();
String getConnectedZooKeeperHost() const { return connected_zk_host; }
String getConnectedZooKeeperPort() const { return connected_zk_port; }
size_t getConnectedZooKeeperIndex() const { return connected_zk_index; }
private:
void init(ZooKeeperArgs args_);
@ -586,6 +590,10 @@ private:
ZooKeeperArgs args;
String connected_zk_host;
String connected_zk_port;
size_t connected_zk_index;
std::mutex mutex;
Poco::Logger * log = nullptr;

View File

@ -433,6 +433,8 @@ void ZooKeeper::connect(
}
connected = true;
connected_zk_address = node.address.toString();
break;
}
catch (...)
@ -448,6 +450,8 @@ void ZooKeeper::connect(
if (!connected)
{
WriteBufferFromOwnString message;
connected_zk_address = "";
message << "All connection tries failed while connecting to ZooKeeper. nodes: ";
bool first = true;
for (const auto & node : nodes)

View File

@ -125,6 +125,8 @@ public:
/// Useful to check owner of ephemeral node.
int64_t getSessionID() const override { return session_id; }
String getConnectedAddress() const override { return connected_zk_address; }
void executeGenericRequest(
const ZooKeeperRequestPtr & request,
ResponseCallback callback);
@ -201,6 +203,7 @@ public:
private:
ACLs default_acls;
String connected_zk_address;
zkutil::ZooKeeperArgs args;

View File

@ -4,37 +4,40 @@
TEST(OptimizeRE, analyze)
{
auto test_f = [](const std::string & regexp, const std::string & answer, std::vector<std::string> expect_alternatives = {}, bool trival_expected = false)
auto test_f = [](const std::string & regexp, const std::string & required, std::vector<std::string> expect_alternatives = {}, bool trival_expected = false, bool prefix_expected = false)
{
std::string required;
std::string answer;
bool is_trivial;
bool is_prefix;
std::vector<std::string> alternatives;
OptimizedRegularExpression::analyze(regexp, required, is_trivial, is_prefix, alternatives);
OptimizedRegularExpression::analyze(regexp, answer, is_trivial, is_prefix, alternatives);
std::cerr << regexp << std::endl;
EXPECT_EQ(required, answer);
EXPECT_EQ(alternatives, expect_alternatives);
EXPECT_EQ(is_trivial, trival_expected);
EXPECT_EQ(is_prefix, prefix_expected);
};
test_f("abc", "abc", {}, true);
test_f("abc", "abc", {}, true, true);
test_f("c([^k]*)de", "");
test_f("abc(de)fg", "abcdefg");
test_f("abc(de|xyz)fg", "abc", {"abcdefg", "abcxyzfg"});
test_f("abc(de?f|xyz)fg", "abc", {"abcd", "abcxyzfg"});
test_f("abc(de)fg", "abcdefg", {}, false, true);
test_f("abc(de|xyz)fg", "abc", {"abcdefg", "abcxyzfg"}, false, true);
test_f("abc(de?f|xyz)fg", "abc", {"abcd", "abcxyzfg"}, false, true);
test_f("abc|fgk|xyz", "", {"abc","fgk", "xyz"});
test_f("(abc)", "abc");
test_f("(abc)", "abc", {}, false, true);
test_f("(abc|fgk)", "", {"abc","fgk"});
test_f("(abc|fgk)(e|f|zkh|)", "", {"abc","fgk"});
test_f("abc(abc|fg)xyzz", "xyzz", {"abcabcxyzz","abcfgxyzz"});
test_f("((abc|fg)kkk*)xyzz", "xyzz", {"abckk", "fgkk"});
test_f("abc(*(abc|fg)*)xyzz", "xyzz");
test_f("abc[k]xyzz", "xyzz");
test_f("(abc[k]xyzz)", "xyzz");
test_f("abc((de)fg(hi))jk", "abcdefghijk");
test_f("abc((?:de)fg(?:hi))jk", "abcdefghijk");
test_f("abc((de)fghi+zzz)jk", "abcdefghi");
test_f("abc((de)fg(hi))?jk", "abc");
test_f("abc((de)fghi?zzz)jk", "abcdefgh");
test_f("abc((de)fg(hi))jk", "abcdefghijk", {}, false, true);
test_f("abc((?:de)fg(?:hi))jk", "abcdefghijk", {}, false, true);
test_f("abc((de)fghi+zzz)jk", "abcdefghi", {}, false, true);
test_f("abc((de)fg(hi))?jk", "abc", {}, false, true);
test_f("abc((de)fghi?zzz)jk", "abcdefgh", {}, false, true);
test_f("abc(*cd)jk", "cdjk");
test_f(R"(abc(de|xyz|(\{xx\}))fg)", "abc", {"abcdefg", "abcxyzfg", "abc{xx}fg"});
test_f(R"(abc(de|xyz|(\{xx\}))fg)", "abc", {"abcdefg", "abcxyzfg", "abc{xx}fg"}, false, true);
test_f("abc(abc|fg)?xyzz", "xyzz");
test_f("abc(abc|fg){0,1}xyzz", "xyzz");
test_f("abc(abc|fg)xyzz|bcdd?k|bc(f|g|h?)z", "", {"abcabcxyzz", "abcfgxyzz", "bcd", "bc"});
@ -43,4 +46,5 @@ TEST(OptimizeRE, analyze)
test_f(R"([Bb]ai[Dd]u[Ss]pider(?:-[A-Za-z]{1,30})(?:-[A-Za-z]{1,30}|)|bingbot|\bYeti(?:-[a-z]{1,30}|)|Catchpoint(?: bot|)|[Cc]harlotte|Daumoa(?:-feedfetcher|)|(?:[a-zA-Z]{1,30}-|)Googlebot(?:-[a-zA-Z]{1,30}|))", "", {"pider-", "bingbot", "Yeti-", "Yeti", "Catchpoint bot", "Catchpoint", "harlotte", "Daumoa-feedfetcher", "Daumoa", "-Googlebot", "Googlebot"});
test_f("abc|(:?xx|yy|zz|x?)def", "", {"abc", "def"});
test_f("abc|(:?xx|yy|zz|x?){1,2}def", "", {"abc", "def"});
test_f(R"(\\A(?:(?:[-0-9_a-z]+(?:\\.[-0-9_a-z]+)*)/k8s1)\\z)", "/k8s1");
}

View File

@ -279,7 +279,17 @@ private:
flush();
if (log_file_settings.max_size != 0)
ftruncate(file_buffer->getFD(), initial_file_size + file_buffer->count());
{
int res = -1;
do
{
res = ftruncate(file_buffer->getFD(), initial_file_size + file_buffer->count());
}
while (res < 0 && errno == EINTR);
if (res != 0)
LOG_WARNING(log, "Could not ftruncate file. Error: {}, errno: {}", errnoToString(), errno);
}
if (log_file_settings.compress_logs)
compressed_buffer.reset();

View File

@ -149,6 +149,7 @@ void KeeperSnapshotManagerS3::uploadSnapshotImpl(const std::string & snapshot_pa
s3_client->client,
s3_client->uri.bucket,
key,
DBMS_DEFAULT_BUFFER_SIZE,
request_settings_1
};
};

View File

@ -23,6 +23,8 @@ namespace DB
M(UInt64, io_thread_pool_queue_size, 10000, "Queue size for IO thread pool.", 0) \
M(UInt64, max_outdated_parts_loading_thread_pool_size, 32, "The maximum number of threads that would be used for loading outdated data parts on startup", 0) \
M(UInt64, outdated_part_loading_thread_pool_queue_size, 10000, "Queue size for parts loading thread pool.", 0) \
M(UInt64, max_replicated_fetches_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited.", 0) \
M(UInt64, max_replicated_sends_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited.", 0) \
M(UInt64, max_remote_read_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for read. Zero means unlimited.", 0) \
M(UInt64, max_remote_write_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for write. Zero means unlimited.", 0) \
M(UInt64, max_local_read_bandwidth_for_server, 0, "The maximum speed of local reads in bytes per second. Zero means unlimited.", 0) \

Some files were not shown because too many files have changed in this diff Show More