mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into url-function-docs
This commit is contained in:
commit
0c3392662c
@ -102,6 +102,17 @@ if (ENABLE_FUZZING)
|
|||||||
set (ENABLE_PROTOBUF 1)
|
set (ENABLE_PROTOBUF 1)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
option (ENABLE_WOBOQ_CODEBROWSER "Build for woboq codebrowser" OFF)
|
||||||
|
|
||||||
|
if (ENABLE_WOBOQ_CODEBROWSER)
|
||||||
|
set (ENABLE_EMBEDDED_COMPILER 0)
|
||||||
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-poison-system-directories")
|
||||||
|
# woboq codebrowser uses clang tooling, and they could add default system
|
||||||
|
# clang includes, and later clang will warn for those added by itself
|
||||||
|
# includes.
|
||||||
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-poison-system-directories")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Global libraries
|
# Global libraries
|
||||||
# See:
|
# See:
|
||||||
# - default_libs.cmake
|
# - default_libs.cmake
|
||||||
@ -259,8 +270,8 @@ endif ()
|
|||||||
option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT})
|
option (ENABLE_BUILD_PATH_MAPPING "Enable remapping of file source paths in debug info, predefined preprocessor macros, and __builtin_FILE(). It's used to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ${ENABLE_BUILD_PATH_MAPPING_DEFAULT})
|
||||||
|
|
||||||
if (ENABLE_BUILD_PATH_MAPPING)
|
if (ENABLE_BUILD_PATH_MAPPING)
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${PROJECT_SOURCE_DIR}=.")
|
||||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${PROJECT_SOURCE_DIR}=.")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
|
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
|
||||||
@ -557,7 +568,7 @@ if (NATIVE_BUILD_TARGETS
|
|||||||
)
|
)
|
||||||
message (STATUS "Building native targets...")
|
message (STATUS "Building native targets...")
|
||||||
|
|
||||||
set (NATIVE_BUILD_DIR "${CMAKE_BINARY_DIR}/native")
|
set (NATIVE_BUILD_DIR "${PROJECT_BINARY_DIR}/native")
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
||||||
@ -571,7 +582,7 @@ if (NATIVE_BUILD_TARGETS
|
|||||||
# Avoid overriding .cargo/config.toml with native toolchain.
|
# Avoid overriding .cargo/config.toml with native toolchain.
|
||||||
"-DENABLE_RUST=OFF"
|
"-DENABLE_RUST=OFF"
|
||||||
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
||||||
${CMAKE_SOURCE_DIR}
|
${PROJECT_SOURCE_DIR}
|
||||||
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT)
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <stdexcept> // for std::logic_error
|
#include <stdexcept> // for std::logic_error
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <type_traits>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
@ -326,5 +327,16 @@ namespace ZeroTraits
|
|||||||
inline void set(StringRef & x) { x.size = 0; }
|
inline void set(StringRef & x) { x.size = 0; }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace PackedZeroTraits
|
||||||
|
{
|
||||||
|
template <typename Second, template <typename, typename> class PackedPairNoInit>
|
||||||
|
inline bool check(const PackedPairNoInit<StringRef, Second> p)
|
||||||
|
{ return 0 == p.key.size; }
|
||||||
|
|
||||||
|
template <typename Second, template <typename, typename> class PackedPairNoInit>
|
||||||
|
inline void set(PackedPairNoInit<StringRef, Second> & p)
|
||||||
|
{ p.key.size = 0; }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
std::ostream & operator<<(std::ostream & os, const StringRef & str);
|
std::ostream & operator<<(std::ostream & os, const StringRef & str);
|
||||||
|
@ -5,11 +5,11 @@ if (NOT TARGET check)
|
|||||||
if (CMAKE_CONFIGURATION_TYPES)
|
if (CMAKE_CONFIGURATION_TYPES)
|
||||||
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
|
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
|
||||||
--force-new-ctest-process --output-on-failure --build-config "$<CONFIGURATION>"
|
--force-new-ctest-process --output-on-failure --build-config "$<CONFIGURATION>"
|
||||||
WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
|
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
|
||||||
else ()
|
else ()
|
||||||
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
|
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
|
||||||
--force-new-ctest-process --output-on-failure
|
--force-new-ctest-process --output-on-failure
|
||||||
WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
|
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
@ -5,14 +5,14 @@ if (Git_FOUND)
|
|||||||
# Commit hash + whether the building workspace was dirty or not
|
# Commit hash + whether the building workspace was dirty or not
|
||||||
execute_process(COMMAND
|
execute_process(COMMAND
|
||||||
"${GIT_EXECUTABLE}" rev-parse HEAD
|
"${GIT_EXECUTABLE}" rev-parse HEAD
|
||||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||||
OUTPUT_VARIABLE GIT_HASH
|
OUTPUT_VARIABLE GIT_HASH
|
||||||
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
|
||||||
# Branch name
|
# Branch name
|
||||||
execute_process(COMMAND
|
execute_process(COMMAND
|
||||||
"${GIT_EXECUTABLE}" rev-parse --abbrev-ref HEAD
|
"${GIT_EXECUTABLE}" rev-parse --abbrev-ref HEAD
|
||||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||||
OUTPUT_VARIABLE GIT_BRANCH
|
OUTPUT_VARIABLE GIT_BRANCH
|
||||||
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
|
||||||
@ -20,14 +20,14 @@ if (Git_FOUND)
|
|||||||
SET(ENV{TZ} "UTC")
|
SET(ENV{TZ} "UTC")
|
||||||
execute_process(COMMAND
|
execute_process(COMMAND
|
||||||
"${GIT_EXECUTABLE}" log -1 --format=%ad --date=iso-local
|
"${GIT_EXECUTABLE}" log -1 --format=%ad --date=iso-local
|
||||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||||
OUTPUT_VARIABLE GIT_DATE
|
OUTPUT_VARIABLE GIT_DATE
|
||||||
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
|
||||||
# Subject of the commit
|
# Subject of the commit
|
||||||
execute_process(COMMAND
|
execute_process(COMMAND
|
||||||
"${GIT_EXECUTABLE}" log -1 --format=%s
|
"${GIT_EXECUTABLE}" log -1 --format=%s
|
||||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||||
OUTPUT_VARIABLE GIT_COMMIT_SUBJECT
|
OUTPUT_VARIABLE GIT_COMMIT_SUBJECT
|
||||||
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
|
|
||||||
@ -35,7 +35,7 @@ if (Git_FOUND)
|
|||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND ${GIT_EXECUTABLE} status
|
COMMAND ${GIT_EXECUTABLE} status
|
||||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE)
|
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
else()
|
else()
|
||||||
message(STATUS "Git could not be found.")
|
message(STATUS "Git could not be found.")
|
||||||
endif()
|
endif()
|
||||||
|
@ -7,6 +7,6 @@ message (STATUS "compiler CXX = ${CMAKE_CXX_COMPILER} ${FULL_CXX_FLAGS}")
|
|||||||
message (STATUS "LINKER_FLAGS = ${FULL_EXE_LINKER_FLAGS}")
|
message (STATUS "LINKER_FLAGS = ${FULL_EXE_LINKER_FLAGS}")
|
||||||
|
|
||||||
# Reproducible builds
|
# Reproducible builds
|
||||||
string (REPLACE "${CMAKE_SOURCE_DIR}" "." FULL_C_FLAGS_NORMALIZED "${FULL_C_FLAGS}")
|
string (REPLACE "${PROJECT_SOURCE_DIR}" "." FULL_C_FLAGS_NORMALIZED "${FULL_C_FLAGS}")
|
||||||
string (REPLACE "${CMAKE_SOURCE_DIR}" "." FULL_CXX_FLAGS_NORMALIZED "${FULL_CXX_FLAGS}")
|
string (REPLACE "${PROJECT_SOURCE_DIR}" "." FULL_CXX_FLAGS_NORMALIZED "${FULL_CXX_FLAGS}")
|
||||||
string (REPLACE "${CMAKE_SOURCE_DIR}" "." FULL_EXE_LINKER_FLAGS_NORMALIZED "${FULL_EXE_LINKER_FLAGS}")
|
string (REPLACE "${PROJECT_SOURCE_DIR}" "." FULL_EXE_LINKER_FLAGS_NORMALIZED "${FULL_EXE_LINKER_FLAGS}")
|
||||||
|
@ -8,6 +8,9 @@ option (SANITIZE "Enable one of the code sanitizers" "")
|
|||||||
|
|
||||||
set (SAN_FLAGS "${SAN_FLAGS} -g -fno-omit-frame-pointer -DSANITIZER")
|
set (SAN_FLAGS "${SAN_FLAGS} -g -fno-omit-frame-pointer -DSANITIZER")
|
||||||
|
|
||||||
|
# It's possible to pass an ignore list to sanitizers (-fsanitize-ignorelist). Intentionally not doing this because
|
||||||
|
# 1. out-of-source suppressions are awkward 2. it seems ignore lists don't work after the Clang v16 upgrade (#49829)
|
||||||
|
|
||||||
if (SANITIZE)
|
if (SANITIZE)
|
||||||
if (SANITIZE STREQUAL "address")
|
if (SANITIZE STREQUAL "address")
|
||||||
set (ASAN_FLAGS "-fsanitize=address -fsanitize-address-use-after-scope")
|
set (ASAN_FLAGS "-fsanitize=address -fsanitize-address-use-after-scope")
|
||||||
@ -29,14 +32,14 @@ if (SANITIZE)
|
|||||||
|
|
||||||
# Linking can fail due to relocation overflows (see #49145), caused by too big object files / libraries.
|
# Linking can fail due to relocation overflows (see #49145), caused by too big object files / libraries.
|
||||||
# Work around this with position-independent builds (-fPIC and -fpie), this is slightly slower than non-PIC/PIE but that's okay.
|
# Work around this with position-independent builds (-fPIC and -fpie), this is slightly slower than non-PIC/PIE but that's okay.
|
||||||
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fPIC -fpie -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt")
|
set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fPIC -fpie")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}")
|
||||||
|
|
||||||
elseif (SANITIZE STREQUAL "thread")
|
elseif (SANITIZE STREQUAL "thread")
|
||||||
set (TSAN_FLAGS "-fsanitize=thread")
|
set (TSAN_FLAGS "-fsanitize=thread")
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt")
|
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/tests/tsan_suppressions.txt")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}")
|
||||||
@ -54,7 +57,7 @@ if (SANITIZE)
|
|||||||
set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
|
set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
|
||||||
endif()
|
endif()
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${PROJECT_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
include(${CMAKE_SOURCE_DIR}/cmake/autogenerated_versions.txt)
|
include(${PROJECT_SOURCE_DIR}/cmake/autogenerated_versions.txt)
|
||||||
|
|
||||||
set(VERSION_EXTRA "" CACHE STRING "")
|
set(VERSION_EXTRA "" CACHE STRING "")
|
||||||
set(VERSION_TWEAK "" CACHE STRING "")
|
set(VERSION_TWEAK "" CACHE STRING "")
|
||||||
|
@ -6,7 +6,7 @@ if (NOT ENABLE_AVRO)
|
|||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(AVROCPP_ROOT_DIR "${CMAKE_SOURCE_DIR}/contrib/avro/lang/c++")
|
set(AVROCPP_ROOT_DIR "${PROJECT_SOURCE_DIR}/contrib/avro/lang/c++")
|
||||||
set(AVROCPP_INCLUDE_DIR "${AVROCPP_ROOT_DIR}/api")
|
set(AVROCPP_INCLUDE_DIR "${AVROCPP_ROOT_DIR}/api")
|
||||||
set(AVROCPP_SOURCE_DIR "${AVROCPP_ROOT_DIR}/impl")
|
set(AVROCPP_SOURCE_DIR "${AVROCPP_ROOT_DIR}/impl")
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ endif()
|
|||||||
# Need to use C++17 since the compilation is not possible with C++20 currently.
|
# Need to use C++17 since the compilation is not possible with C++20 currently.
|
||||||
set (CMAKE_CXX_STANDARD 17)
|
set (CMAKE_CXX_STANDARD 17)
|
||||||
|
|
||||||
set(CASS_ROOT_DIR ${CMAKE_SOURCE_DIR}/contrib/cassandra)
|
set(CASS_ROOT_DIR ${PROJECT_SOURCE_DIR}/contrib/cassandra)
|
||||||
set(CASS_SRC_DIR "${CASS_ROOT_DIR}/src")
|
set(CASS_SRC_DIR "${CASS_ROOT_DIR}/src")
|
||||||
set(CASS_INCLUDE_DIR "${CASS_ROOT_DIR}/include")
|
set(CASS_INCLUDE_DIR "${CASS_ROOT_DIR}/include")
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ endif ()
|
|||||||
# StorageSystemTimeZones.generated.cpp is autogenerated each time during a build
|
# StorageSystemTimeZones.generated.cpp is autogenerated each time during a build
|
||||||
# data in this file will be used to populate the system.time_zones table, this is specific to OS_LINUX
|
# data in this file will be used to populate the system.time_zones table, this is specific to OS_LINUX
|
||||||
# as the library that's built using embedded tzdata is also specific to OS_LINUX
|
# as the library that's built using embedded tzdata is also specific to OS_LINUX
|
||||||
set(SYSTEM_STORAGE_TZ_FILE "${CMAKE_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
|
set(SYSTEM_STORAGE_TZ_FILE "${PROJECT_BINARY_DIR}/src/Storages/System/StorageSystemTimeZones.generated.cpp")
|
||||||
# remove existing copies so that its generated fresh on each build.
|
# remove existing copies so that its generated fresh on each build.
|
||||||
file(REMOVE ${SYSTEM_STORAGE_TZ_FILE})
|
file(REMOVE ${SYSTEM_STORAGE_TZ_FILE})
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# This file is a modified version of contrib/libuv/CMakeLists.txt
|
# This file is a modified version of contrib/libuv/CMakeLists.txt
|
||||||
|
|
||||||
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/libuv")
|
set (SOURCE_DIR "${PROJECT_SOURCE_DIR}/contrib/libuv")
|
||||||
set (BINARY_DIR "${CMAKE_BINARY_DIR}/contrib/libuv")
|
set (BINARY_DIR "${PROJECT_BINARY_DIR}/contrib/libuv")
|
||||||
|
|
||||||
set(uv_sources
|
set(uv_sources
|
||||||
src/fs-poll.c
|
src/fs-poll.c
|
||||||
|
@ -15,7 +15,7 @@ endif()
|
|||||||
|
|
||||||
# This is the LGPL libmariadb project.
|
# This is the LGPL libmariadb project.
|
||||||
|
|
||||||
set(CC_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/mariadb-connector-c)
|
set(CC_SOURCE_DIR ${PROJECT_SOURCE_DIR}/contrib/mariadb-connector-c)
|
||||||
set(CC_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
|
set(CC_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
set(WITH_SSL ON)
|
set(WITH_SSL ON)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/snappy")
|
set (SOURCE_DIR "${PROJECT_SOURCE_DIR}/contrib/snappy")
|
||||||
|
|
||||||
if (ARCH_S390X)
|
if (ARCH_S390X)
|
||||||
set (SNAPPY_IS_BIG_ENDIAN 1)
|
set (SNAPPY_IS_BIG_ENDIAN 1)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
set (SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/zlib-ng)
|
set (SOURCE_DIR ${PROJECT_SOURCE_DIR}/contrib/zlib-ng)
|
||||||
|
|
||||||
add_definitions(-DZLIB_COMPAT)
|
add_definitions(-DZLIB_COMPAT)
|
||||||
add_definitions(-DWITH_GZFILEOP)
|
add_definitions(-DWITH_GZFILEOP)
|
||||||
|
@ -15,7 +15,7 @@ nproc=$(($(nproc) + 2)) # increase parallelism
|
|||||||
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||||
|
|
||||||
mkdir -p "$BUILD_DIRECTORY" && cd "$BUILD_DIRECTORY"
|
mkdir -p "$BUILD_DIRECTORY" && cd "$BUILD_DIRECTORY"
|
||||||
cmake "$SOURCE_DIRECTORY" -DCMAKE_CXX_COMPILER="/usr/bin/clang++-${LLVM_VERSION}" -DCMAKE_C_COMPILER="/usr/bin/clang-${LLVM_VERSION}" -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 "${CMAKE_FLAGS[@]}"
|
cmake "$SOURCE_DIRECTORY" -DCMAKE_CXX_COMPILER="/usr/bin/clang++-${LLVM_VERSION}" -DCMAKE_C_COMPILER="/usr/bin/clang-${LLVM_VERSION}" -DENABLE_WOBOQ_CODEBROWSER=ON "${CMAKE_FLAGS[@]}"
|
||||||
mkdir -p "$HTML_RESULT_DIRECTORY"
|
mkdir -p "$HTML_RESULT_DIRECTORY"
|
||||||
echo 'Filter out too noisy "Error: filename" lines and keep them in full codebrowser_generator.log'
|
echo 'Filter out too noisy "Error: filename" lines and keep them in full codebrowser_generator.log'
|
||||||
/woboq_codebrowser/generator/codebrowser_generator -b "$BUILD_DIRECTORY" -a \
|
/woboq_codebrowser/generator/codebrowser_generator -b "$BUILD_DIRECTORY" -a \
|
||||||
|
@ -13,8 +13,8 @@ The PostgreSQL engine allows to perform `SELECT` and `INSERT` queries on data th
|
|||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
|
name1 type1 [DEFAULT|MATERIALIZED|ALIAS expr1] [TTL expr1],
|
||||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
|
name2 type2 [DEFAULT|MATERIALIZED|ALIAS expr2] [TTL expr2],
|
||||||
...
|
...
|
||||||
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]);
|
) ENGINE = PostgreSQL('host:port', 'database', 'table', 'user', 'password'[, `schema`]);
|
||||||
```
|
```
|
||||||
|
636
docs/en/getting-started/example-datasets/reddit-comments.md
Normal file
636
docs/en/getting-started/example-datasets/reddit-comments.md
Normal file
@ -0,0 +1,636 @@
|
|||||||
|
---
|
||||||
|
slug: /en/getting-started/example-datasets/reddit-comments
|
||||||
|
sidebar_label: Reddit comments
|
||||||
|
---
|
||||||
|
|
||||||
|
# Reddit comments dataset
|
||||||
|
|
||||||
|
This dataset contains publicly-available comments on Reddit that go back to December, 2005, to March, 2023, and contains over 7B rows of data. The raw data is in JSON format in compressed `.zst` files and the rows look like the following:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"controversiality":0,"body":"A look at Vietnam and Mexico exposes the myth of market liberalisation.","subreddit_id":"t5_6","link_id":"t3_17863","stickied":false,"subreddit":"reddit.com","score":2,"ups":2,"author_flair_css_class":null,"created_utc":1134365188,"author_flair_text":null,"author":"frjo","id":"c13","edited":false,"parent_id":"t3_17863","gilded":0,"distinguished":null,"retrieved_on":1473738411}
|
||||||
|
{"created_utc":1134365725,"author_flair_css_class":null,"score":1,"ups":1,"subreddit":"reddit.com","stickied":false,"link_id":"t3_17866","subreddit_id":"t5_6","controversiality":0,"body":"The site states \"What can I use it for? Meeting notes, Reports, technical specs Sign-up sheets, proposals and much more...\", just like any other new breeed of sites that want us to store everything we have on the web. And they even guarantee multiple levels of security and encryption etc. But what prevents these web site operators fom accessing and/or stealing Meeting notes, Reports, technical specs Sign-up sheets, proposals and much more, for competitive or personal gains...? I am pretty sure that most of them are honest, but what's there to prevent me from setting up a good useful site and stealing all your data? Call me paranoid - I am.","retrieved_on":1473738411,"distinguished":null,"gilded":0,"id":"c14","edited":false,"parent_id":"t3_17866","author":"zse7zse","author_flair_text":null}
|
||||||
|
{"gilded":0,"distinguished":null,"retrieved_on":1473738411,"author":"[deleted]","author_flair_text":null,"edited":false,"id":"c15","parent_id":"t3_17869","subreddit":"reddit.com","score":0,"ups":0,"created_utc":1134366848,"author_flair_css_class":null,"body":"Jython related topics by Frank Wierzbicki","controversiality":0,"subreddit_id":"t5_6","stickied":false,"link_id":"t3_17869"}
|
||||||
|
{"gilded":0,"retrieved_on":1473738411,"distinguished":null,"author_flair_text":null,"author":"[deleted]","edited":false,"parent_id":"t3_17870","id":"c16","subreddit":"reddit.com","created_utc":1134367660,"author_flair_css_class":null,"score":1,"ups":1,"body":"[deleted]","controversiality":0,"stickied":false,"link_id":"t3_17870","subreddit_id":"t5_6"}
|
||||||
|
{"gilded":0,"retrieved_on":1473738411,"distinguished":null,"author_flair_text":null,"author":"rjoseph","edited":false,"id":"c17","parent_id":"t3_17817","subreddit":"reddit.com","author_flair_css_class":null,"created_utc":1134367754,"score":1,"ups":1,"body":"Saft is by far the best extension you could tak onto your Safari","controversiality":0,"link_id":"t3_17817","stickied":false,"subreddit_id":"t5_6"}
|
||||||
|
```
|
||||||
|
|
||||||
|
A shoutout to Percona for the [motivation behind ingesting this dataset](https://www.percona.com/blog/big-data-set-reddit-comments-analyzing-clickhouse/), which we have downloaded and stored in an S3 bucket.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The following commands were executed on ClickHouse Cloud. To run this on your own cluster, replace `default` in the `s3Cluster` function call with the name of your cluster. If you do not have a cluster, then replace the `s3Cluster` function with the `s3` function.
|
||||||
|
:::
|
||||||
|
|
||||||
|
1. Let's create a table for the Reddit data:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE reddit
|
||||||
|
(
|
||||||
|
subreddit LowCardinality(String),
|
||||||
|
subreddit_id LowCardinality(String),
|
||||||
|
subreddit_type Enum('public' = 1, 'restricted' = 2, 'user' = 3, 'archived' = 4, 'gold_restricted' = 5, 'private' = 6),
|
||||||
|
author LowCardinality(String),
|
||||||
|
body String CODEC(ZSTD(6)),
|
||||||
|
created_date Date DEFAULT toDate(created_utc),
|
||||||
|
created_utc DateTime,
|
||||||
|
retrieved_on DateTime,
|
||||||
|
id String,
|
||||||
|
parent_id String,
|
||||||
|
link_id String,
|
||||||
|
score Int32,
|
||||||
|
total_awards_received UInt16,
|
||||||
|
controversiality UInt8,
|
||||||
|
gilded UInt8,
|
||||||
|
collapsed_because_crowd_control UInt8,
|
||||||
|
collapsed_reason Enum('' = 0, 'comment score below threshold' = 1, 'may be sensitive content' = 2, 'potentially toxic' = 3, 'potentially toxic content' = 4),
|
||||||
|
distinguished Enum('' = 0, 'moderator' = 1, 'admin' = 2, 'special' = 3),
|
||||||
|
removal_reason Enum('' = 0, 'legal' = 1),
|
||||||
|
author_created_utc DateTime,
|
||||||
|
author_fullname LowCardinality(String),
|
||||||
|
author_patreon_flair UInt8,
|
||||||
|
author_premium UInt8,
|
||||||
|
can_gild UInt8,
|
||||||
|
can_mod_post UInt8,
|
||||||
|
collapsed UInt8,
|
||||||
|
is_submitter UInt8,
|
||||||
|
_edited String,
|
||||||
|
locked UInt8,
|
||||||
|
quarantined UInt8,
|
||||||
|
no_follow UInt8,
|
||||||
|
send_replies UInt8,
|
||||||
|
stickied UInt8,
|
||||||
|
author_flair_text LowCardinality(String)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY (subreddit, created_date, author);
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The names of the files in S3 start with `RC_YYYY-MM` where `YYYY-MM` goes from `2005-12` to `2023-02`. The compression changes a couple of times though, so the file extensions are not consistent. For example:
|
||||||
|
|
||||||
|
- the file names are initially `RC_2005-12.bz2` to `RC_2017-11.bz2`
|
||||||
|
- then they look like `RC_2017-12.xz` to `RC_2018-09.xz`
|
||||||
|
- and finally `RC_2018-10.zst` to `RC_2023-02.zst`
|
||||||
|
:::
|
||||||
|
|
||||||
|
2. We are going to start with one month of data, but if you want to simply insert every row - skip ahead to step 8 below. The following file has 86M records from December, 2017:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO reddit
|
||||||
|
SELECT *
|
||||||
|
FROM s3Cluster(
|
||||||
|
'default',
|
||||||
|
'https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/reddit/original/RC_2017-12.xz',
|
||||||
|
'JSONEachRow'
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
If you do not have a cluster, use `s3` instead of `s3Cluster`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO reddit
|
||||||
|
SELECT *
|
||||||
|
FROM s3(
|
||||||
|
'https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/reddit/original/RC_2017-12.xz',
|
||||||
|
'JSONEachRow'
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
3. It will take a while depending on your resources, but when it's done verify it worked:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT formatReadableQuantity(count())
|
||||||
|
FROM reddit;
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─formatReadableQuantity(count())─┐
|
||||||
|
│ 85.97 million │
|
||||||
|
└─────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Let's see how many unique subreddits were in December of 2017:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT uniqExact(subreddit)
|
||||||
|
FROM reddit;
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─uniqExact(subreddit)─┐
|
||||||
|
│ 91613 │
|
||||||
|
└──────────────────────┘
|
||||||
|
|
||||||
|
1 row in set. Elapsed: 1.572 sec. Processed 85.97 million rows, 367.43 MB (54.71 million rows/s., 233.80 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
5. This query returns the top 10 subreddits (in terms of number of comments):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
subreddit,
|
||||||
|
count() AS c
|
||||||
|
FROM reddit
|
||||||
|
GROUP BY subreddit
|
||||||
|
ORDER BY c DESC
|
||||||
|
LIMIT 20;
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─subreddit───────┬───────c─┐
|
||||||
|
│ AskReddit │ 5245881 │
|
||||||
|
│ politics │ 1753120 │
|
||||||
|
│ nfl │ 1220266 │
|
||||||
|
│ nba │ 960388 │
|
||||||
|
│ The_Donald │ 931857 │
|
||||||
|
│ news │ 796617 │
|
||||||
|
│ worldnews │ 765709 │
|
||||||
|
│ CFB │ 710360 │
|
||||||
|
│ gaming │ 602761 │
|
||||||
|
│ movies │ 601966 │
|
||||||
|
│ soccer │ 590628 │
|
||||||
|
│ Bitcoin │ 583783 │
|
||||||
|
│ pics │ 563408 │
|
||||||
|
│ StarWars │ 562514 │
|
||||||
|
│ funny │ 547563 │
|
||||||
|
│ leagueoflegends │ 517213 │
|
||||||
|
│ teenagers │ 492020 │
|
||||||
|
│ DestinyTheGame │ 477377 │
|
||||||
|
│ todayilearned │ 472650 │
|
||||||
|
│ videos │ 450581 │
|
||||||
|
└─────────────────┴─────────┘
|
||||||
|
|
||||||
|
20 rows in set. Elapsed: 0.368 sec. Processed 85.97 million rows, 367.43 MB (233.34 million rows/s., 997.25 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Here are the top 10 authors in December of 2017, in terms of number of comments posted:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
author,
|
||||||
|
count() AS c
|
||||||
|
FROM reddit
|
||||||
|
GROUP BY author
|
||||||
|
ORDER BY c DESC
|
||||||
|
LIMIT 10;
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─author──────────┬───────c─┐
|
||||||
|
│ [deleted] │ 5913324 │
|
||||||
|
│ AutoModerator │ 784886 │
|
||||||
|
│ ImagesOfNetwork │ 83241 │
|
||||||
|
│ BitcoinAllBot │ 54484 │
|
||||||
|
│ imguralbumbot │ 45822 │
|
||||||
|
│ RPBot │ 29337 │
|
||||||
|
│ WikiTextBot │ 25982 │
|
||||||
|
│ Concise_AMA_Bot │ 19974 │
|
||||||
|
│ MTGCardFetcher │ 19103 │
|
||||||
|
│ TotesMessenger │ 19057 │
|
||||||
|
└─────────────────┴─────────┘
|
||||||
|
|
||||||
|
10 rows in set. Elapsed: 8.143 sec. Processed 85.97 million rows, 711.05 MB (10.56 million rows/s., 87.32 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
7. We already inserted some data, but we will start over:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
TRUNCATE TABLE reddit;
|
||||||
|
```
|
||||||
|
|
||||||
|
8. This is a fun dataset and it looks like we can find some great information, so let's go ahead and insert the entire dataset from 2005 to 2023. When you're ready, run this command to insert all the rows. (It takes a while - up to 17 hours!)
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO reddit
|
||||||
|
SELECT *
|
||||||
|
FROM s3Cluster(
|
||||||
|
'default',
|
||||||
|
'https://clickhouse-public-datasets.s3.amazonaws.com/reddit/original/RC*',
|
||||||
|
'JSONEachRow'
|
||||||
|
)
|
||||||
|
SETTINGS zstd_window_log_max = 31;
|
||||||
|
```
|
||||||
|
|
||||||
|
The response looks like:
|
||||||
|
|
||||||
|
```response
|
||||||
|
0 rows in set. Elapsed: 61187.839 sec. Processed 6.74 billion rows, 2.06 TB (110.17 thousand rows/s., 33.68 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Let's see how many rows were inserted and how much disk space the table is using:
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
sum(rows) AS count,
|
||||||
|
formatReadableQuantity(count),
|
||||||
|
formatReadableSize(sum(bytes)) AS disk_size,
|
||||||
|
formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size
|
||||||
|
FROM system.parts
|
||||||
|
WHERE (table = 'reddit') AND active
|
||||||
|
```
|
||||||
|
|
||||||
|
Notice the compression of disk storage is about 1/3 of the uncompressed size:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌──────count─┬─formatReadableQuantity(sum(rows))─┬─disk_size──┬─uncompressed_size─┐
|
||||||
|
│ 6739503568 │ 6.74 billion │ 501.10 GiB │ 1.51 TiB │
|
||||||
|
└────────────┴───────────────────────────────────┴────────────┴───────────────────┘
|
||||||
|
|
||||||
|
1 row in set. Elapsed: 0.010 sec.
|
||||||
|
```
|
||||||
|
|
||||||
|
9. The following query shows how many comments, authors and subreddits we have for each month:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toStartOfMonth(created_utc) AS firstOfMonth,
|
||||||
|
count() AS c,
|
||||||
|
bar(c, 0, 50000000, 25) AS bar_count,
|
||||||
|
uniq(author) AS authors,
|
||||||
|
bar(authors, 0, 5000000, 25) AS bar_authors,
|
||||||
|
uniq(subreddit) AS subreddits,
|
||||||
|
bar(subreddits, 0, 100000, 25) AS bar_subreddits
|
||||||
|
FROM reddit
|
||||||
|
GROUP BY firstOfMonth
|
||||||
|
ORDER BY firstOfMonth ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
This is a substantial query that has to process all 6.74 billion rows, but we still get an impressive response time (about 3 minutes):
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─firstOfMonth─┬─────────c─┬─bar_count─────────────────┬─authors─┬─bar_authors───────────────┬─subreddits─┬─bar_subreddits────────────┐
|
||||||
|
│ 2005-12-01 │ 1075 │ │ 394 │ │ 1 │ │
|
||||||
|
│ 2006-01-01 │ 3666 │ │ 791 │ │ 2 │ │
|
||||||
|
│ 2006-02-01 │ 9095 │ │ 1464 │ │ 18 │ │
|
||||||
|
│ 2006-03-01 │ 13859 │ │ 1958 │ │ 15 │ │
|
||||||
|
│ 2006-04-01 │ 19090 │ │ 2334 │ │ 21 │ │
|
||||||
|
│ 2006-05-01 │ 26859 │ │ 2698 │ │ 21 │ │
|
||||||
|
│ 2006-06-01 │ 29163 │ │ 3043 │ │ 19 │ │
|
||||||
|
│ 2006-07-01 │ 37031 │ │ 3532 │ │ 22 │ │
|
||||||
|
│ 2006-08-01 │ 50559 │ │ 4750 │ │ 24 │ │
|
||||||
|
│ 2006-09-01 │ 50675 │ │ 4908 │ │ 21 │ │
|
||||||
|
│ 2006-10-01 │ 54148 │ │ 5654 │ │ 31 │ │
|
||||||
|
│ 2006-11-01 │ 62021 │ │ 6490 │ │ 23 │ │
|
||||||
|
│ 2006-12-01 │ 61018 │ │ 6707 │ │ 24 │ │
|
||||||
|
│ 2007-01-01 │ 81341 │ │ 7931 │ │ 23 │ │
|
||||||
|
│ 2007-02-01 │ 95634 │ │ 9020 │ │ 21 │ │
|
||||||
|
│ 2007-03-01 │ 112444 │ │ 10842 │ │ 23 │ │
|
||||||
|
│ 2007-04-01 │ 126773 │ │ 10701 │ │ 26 │ │
|
||||||
|
│ 2007-05-01 │ 170097 │ │ 11365 │ │ 25 │ │
|
||||||
|
│ 2007-06-01 │ 178800 │ │ 11267 │ │ 22 │ │
|
||||||
|
│ 2007-07-01 │ 203319 │ │ 12482 │ │ 25 │ │
|
||||||
|
│ 2007-08-01 │ 225111 │ │ 14124 │ │ 30 │ │
|
||||||
|
│ 2007-09-01 │ 259497 │ ▏ │ 15416 │ │ 33 │ │
|
||||||
|
│ 2007-10-01 │ 274170 │ ▏ │ 15302 │ │ 36 │ │
|
||||||
|
│ 2007-11-01 │ 372983 │ ▏ │ 15134 │ │ 43 │ │
|
||||||
|
│ 2007-12-01 │ 363390 │ ▏ │ 15915 │ │ 31 │ │
|
||||||
|
│ 2008-01-01 │ 452990 │ ▏ │ 18857 │ │ 126 │ │
|
||||||
|
│ 2008-02-01 │ 441768 │ ▏ │ 18266 │ │ 173 │ │
|
||||||
|
│ 2008-03-01 │ 463728 │ ▏ │ 18947 │ │ 292 │ │
|
||||||
|
│ 2008-04-01 │ 468317 │ ▏ │ 18590 │ │ 323 │ │
|
||||||
|
│ 2008-05-01 │ 536380 │ ▎ │ 20861 │ │ 375 │ │
|
||||||
|
│ 2008-06-01 │ 577684 │ ▎ │ 22557 │ │ 575 │ ▏ │
|
||||||
|
│ 2008-07-01 │ 592610 │ ▎ │ 23123 │ │ 657 │ ▏ │
|
||||||
|
│ 2008-08-01 │ 595959 │ ▎ │ 23729 │ │ 707 │ ▏ │
|
||||||
|
│ 2008-09-01 │ 680892 │ ▎ │ 26374 │ ▏ │ 801 │ ▏ │
|
||||||
|
│ 2008-10-01 │ 789874 │ ▍ │ 28970 │ ▏ │ 893 │ ▏ │
|
||||||
|
│ 2008-11-01 │ 792310 │ ▍ │ 30272 │ ▏ │ 1024 │ ▎ │
|
||||||
|
│ 2008-12-01 │ 850359 │ ▍ │ 34073 │ ▏ │ 1103 │ ▎ │
|
||||||
|
│ 2009-01-01 │ 1051649 │ ▌ │ 38978 │ ▏ │ 1316 │ ▎ │
|
||||||
|
│ 2009-02-01 │ 944711 │ ▍ │ 43390 │ ▏ │ 1132 │ ▎ │
|
||||||
|
│ 2009-03-01 │ 1048643 │ ▌ │ 46516 │ ▏ │ 1203 │ ▎ │
|
||||||
|
│ 2009-04-01 │ 1094599 │ ▌ │ 48284 │ ▏ │ 1334 │ ▎ │
|
||||||
|
│ 2009-05-01 │ 1201257 │ ▌ │ 52512 │ ▎ │ 1395 │ ▎ │
|
||||||
|
│ 2009-06-01 │ 1258750 │ ▋ │ 57728 │ ▎ │ 1473 │ ▎ │
|
||||||
|
│ 2009-07-01 │ 1470290 │ ▋ │ 60098 │ ▎ │ 1686 │ ▍ │
|
||||||
|
│ 2009-08-01 │ 1750688 │ ▉ │ 67347 │ ▎ │ 1777 │ ▍ │
|
||||||
|
│ 2009-09-01 │ 2032276 │ █ │ 78051 │ ▍ │ 1784 │ ▍ │
|
||||||
|
│ 2009-10-01 │ 2242017 │ █ │ 93409 │ ▍ │ 2071 │ ▌ │
|
||||||
|
│ 2009-11-01 │ 2207444 │ █ │ 95940 │ ▍ │ 2141 │ ▌ │
|
||||||
|
│ 2009-12-01 │ 2560510 │ █▎ │ 104239 │ ▌ │ 2141 │ ▌ │
|
||||||
|
│ 2010-01-01 │ 2884096 │ █▍ │ 114314 │ ▌ │ 2313 │ ▌ │
|
||||||
|
│ 2010-02-01 │ 2687779 │ █▎ │ 115683 │ ▌ │ 2522 │ ▋ │
|
||||||
|
│ 2010-03-01 │ 3228254 │ █▌ │ 125775 │ ▋ │ 2890 │ ▋ │
|
||||||
|
│ 2010-04-01 │ 3209898 │ █▌ │ 128936 │ ▋ │ 3170 │ ▊ │
|
||||||
|
│ 2010-05-01 │ 3267363 │ █▋ │ 131851 │ ▋ │ 3166 │ ▊ │
|
||||||
|
│ 2010-06-01 │ 3532867 │ █▊ │ 139522 │ ▋ │ 3301 │ ▊ │
|
||||||
|
│ 2010-07-01 │ 4032737 │ ██ │ 153451 │ ▊ │ 3662 │ ▉ │
|
||||||
|
│ 2010-08-01 │ 4247982 │ ██ │ 164071 │ ▊ │ 3653 │ ▉ │
|
||||||
|
│ 2010-09-01 │ 4704069 │ ██▎ │ 186613 │ ▉ │ 4009 │ █ │
|
||||||
|
│ 2010-10-01 │ 5032368 │ ██▌ │ 203800 │ █ │ 4154 │ █ │
|
||||||
|
│ 2010-11-01 │ 5689002 │ ██▊ │ 226134 │ █▏ │ 4383 │ █ │
|
||||||
|
│ 2010-12-01 │ 5972642 │ ██▉ │ 245824 │ █▏ │ 4692 │ █▏ │
|
||||||
|
│ 2011-01-01 │ 6603329 │ ███▎ │ 270025 │ █▎ │ 5141 │ █▎ │
|
||||||
|
│ 2011-02-01 │ 6363114 │ ███▏ │ 277593 │ █▍ │ 5202 │ █▎ │
|
||||||
|
│ 2011-03-01 │ 7556165 │ ███▊ │ 314748 │ █▌ │ 5445 │ █▎ │
|
||||||
|
│ 2011-04-01 │ 7571398 │ ███▊ │ 329920 │ █▋ │ 6128 │ █▌ │
|
||||||
|
│ 2011-05-01 │ 8803949 │ ████▍ │ 365013 │ █▊ │ 6834 │ █▋ │
|
||||||
|
│ 2011-06-01 │ 9766511 │ ████▉ │ 393945 │ █▉ │ 7519 │ █▉ │
|
||||||
|
│ 2011-07-01 │ 10557466 │ █████▎ │ 424235 │ ██ │ 8293 │ ██ │
|
||||||
|
│ 2011-08-01 │ 12316144 │ ██████▏ │ 475326 │ ██▍ │ 9657 │ ██▍ │
|
||||||
|
│ 2011-09-01 │ 12150412 │ ██████ │ 503142 │ ██▌ │ 10278 │ ██▌ │
|
||||||
|
│ 2011-10-01 │ 13470278 │ ██████▋ │ 548801 │ ██▋ │ 10922 │ ██▋ │
|
||||||
|
│ 2011-11-01 │ 13621533 │ ██████▊ │ 574435 │ ██▊ │ 11572 │ ██▉ │
|
||||||
|
│ 2011-12-01 │ 14509469 │ ███████▎ │ 622849 │ ███ │ 12335 │ ███ │
|
||||||
|
│ 2012-01-01 │ 16350205 │ ████████▏ │ 696110 │ ███▍ │ 14281 │ ███▌ │
|
||||||
|
│ 2012-02-01 │ 16015695 │ ████████ │ 722892 │ ███▌ │ 14949 │ ███▋ │
|
||||||
|
│ 2012-03-01 │ 17881943 │ ████████▉ │ 789664 │ ███▉ │ 15795 │ ███▉ │
|
||||||
|
│ 2012-04-01 │ 19044534 │ █████████▌ │ 842491 │ ████▏ │ 16440 │ ████ │
|
||||||
|
│ 2012-05-01 │ 20388260 │ ██████████▏ │ 886176 │ ████▍ │ 16974 │ ████▏ │
|
||||||
|
│ 2012-06-01 │ 21897913 │ ██████████▉ │ 946798 │ ████▋ │ 17952 │ ████▍ │
|
||||||
|
│ 2012-07-01 │ 24087517 │ ████████████ │ 1018636 │ █████ │ 19069 │ ████▊ │
|
||||||
|
│ 2012-08-01 │ 25703326 │ ████████████▊ │ 1094445 │ █████▍ │ 20553 │ █████▏ │
|
||||||
|
│ 2012-09-01 │ 23419524 │ ███████████▋ │ 1088491 │ █████▍ │ 20831 │ █████▏ │
|
||||||
|
│ 2012-10-01 │ 24788236 │ ████████████▍ │ 1131885 │ █████▋ │ 21868 │ █████▍ │
|
||||||
|
│ 2012-11-01 │ 24648302 │ ████████████▎ │ 1167608 │ █████▊ │ 21791 │ █████▍ │
|
||||||
|
│ 2012-12-01 │ 26080276 │ █████████████ │ 1218402 │ ██████ │ 22622 │ █████▋ │
|
||||||
|
│ 2013-01-01 │ 30365867 │ ███████████████▏ │ 1341703 │ ██████▋ │ 24696 │ ██████▏ │
|
||||||
|
│ 2013-02-01 │ 27213960 │ █████████████▌ │ 1304756 │ ██████▌ │ 24514 │ ██████▏ │
|
||||||
|
│ 2013-03-01 │ 30771274 │ ███████████████▍ │ 1391703 │ ██████▉ │ 25730 │ ██████▍ │
|
||||||
|
│ 2013-04-01 │ 33259557 │ ████████████████▋ │ 1485971 │ ███████▍ │ 27294 │ ██████▊ │
|
||||||
|
│ 2013-05-01 │ 33126225 │ ████████████████▌ │ 1506473 │ ███████▌ │ 27299 │ ██████▊ │
|
||||||
|
│ 2013-06-01 │ 32648247 │ ████████████████▎ │ 1506650 │ ███████▌ │ 27450 │ ██████▊ │
|
||||||
|
│ 2013-07-01 │ 34922133 │ █████████████████▍ │ 1561771 │ ███████▊ │ 28294 │ ███████ │
|
||||||
|
│ 2013-08-01 │ 34766579 │ █████████████████▍ │ 1589781 │ ███████▉ │ 28943 │ ███████▏ │
|
||||||
|
│ 2013-09-01 │ 31990369 │ ███████████████▉ │ 1570342 │ ███████▊ │ 29408 │ ███████▎ │
|
||||||
|
│ 2013-10-01 │ 35940040 │ █████████████████▉ │ 1683770 │ ████████▍ │ 30273 │ ███████▌ │
|
||||||
|
│ 2013-11-01 │ 37396497 │ ██████████████████▋ │ 1757467 │ ████████▊ │ 31173 │ ███████▊ │
|
||||||
|
│ 2013-12-01 │ 39810216 │ ███████████████████▉ │ 1846204 │ █████████▏ │ 32326 │ ████████ │
|
||||||
|
│ 2014-01-01 │ 42420655 │ █████████████████████▏ │ 1927229 │ █████████▋ │ 35603 │ ████████▉ │
|
||||||
|
│ 2014-02-01 │ 38703362 │ ███████████████████▎ │ 1874067 │ █████████▎ │ 37007 │ █████████▎ │
|
||||||
|
│ 2014-03-01 │ 42459956 │ █████████████████████▏ │ 1959888 │ █████████▊ │ 37948 │ █████████▍ │
|
||||||
|
│ 2014-04-01 │ 42440735 │ █████████████████████▏ │ 1951369 │ █████████▊ │ 38362 │ █████████▌ │
|
||||||
|
│ 2014-05-01 │ 42514094 │ █████████████████████▎ │ 1970197 │ █████████▊ │ 39078 │ █████████▊ │
|
||||||
|
│ 2014-06-01 │ 41990650 │ ████████████████████▉ │ 1943850 │ █████████▋ │ 38268 │ █████████▌ │
|
||||||
|
│ 2014-07-01 │ 46868899 │ ███████████████████████▍ │ 2059346 │ ██████████▎ │ 40634 │ ██████████▏ │
|
||||||
|
│ 2014-08-01 │ 46990813 │ ███████████████████████▍ │ 2117335 │ ██████████▌ │ 41764 │ ██████████▍ │
|
||||||
|
│ 2014-09-01 │ 44992201 │ ██████████████████████▍ │ 2124708 │ ██████████▌ │ 41890 │ ██████████▍ │
|
||||||
|
│ 2014-10-01 │ 47497520 │ ███████████████████████▋ │ 2206535 │ ███████████ │ 43109 │ ██████████▊ │
|
||||||
|
│ 2014-11-01 │ 46118074 │ ███████████████████████ │ 2239747 │ ███████████▏ │ 43718 │ ██████████▉ │
|
||||||
|
│ 2014-12-01 │ 48807699 │ ████████████████████████▍ │ 2372945 │ ███████████▊ │ 43823 │ ██████████▉ │
|
||||||
|
│ 2015-01-01 │ 53851542 │ █████████████████████████ │ 2499536 │ ████████████▍ │ 47172 │ ███████████▊ │
|
||||||
|
│ 2015-02-01 │ 48342747 │ ████████████████████████▏ │ 2448496 │ ████████████▏ │ 47229 │ ███████████▊ │
|
||||||
|
│ 2015-03-01 │ 54564441 │ █████████████████████████ │ 2550534 │ ████████████▊ │ 48156 │ ████████████ │
|
||||||
|
│ 2015-04-01 │ 55005780 │ █████████████████████████ │ 2609443 │ █████████████ │ 49865 │ ████████████▍ │
|
||||||
|
│ 2015-05-01 │ 54504410 │ █████████████████████████ │ 2585535 │ ████████████▉ │ 50137 │ ████████████▌ │
|
||||||
|
│ 2015-06-01 │ 54258492 │ █████████████████████████ │ 2595129 │ ████████████▉ │ 49598 │ ████████████▍ │
|
||||||
|
│ 2015-07-01 │ 58451788 │ █████████████████████████ │ 2720026 │ █████████████▌ │ 55022 │ █████████████▊ │
|
||||||
|
│ 2015-08-01 │ 58075327 │ █████████████████████████ │ 2743994 │ █████████████▋ │ 55302 │ █████████████▊ │
|
||||||
|
│ 2015-09-01 │ 55574825 │ █████████████████████████ │ 2672793 │ █████████████▎ │ 53960 │ █████████████▍ │
|
||||||
|
│ 2015-10-01 │ 59494045 │ █████████████████████████ │ 2816426 │ ██████████████ │ 70210 │ █████████████████▌ │
|
||||||
|
│ 2015-11-01 │ 57117500 │ █████████████████████████ │ 2847146 │ ██████████████▏ │ 71363 │ █████████████████▊ │
|
||||||
|
│ 2015-12-01 │ 58523312 │ █████████████████████████ │ 2854840 │ ██████████████▎ │ 94559 │ ███████████████████████▋ │
|
||||||
|
│ 2016-01-01 │ 61991732 │ █████████████████████████ │ 2920366 │ ██████████████▌ │ 108438 │ █████████████████████████ │
|
||||||
|
│ 2016-02-01 │ 59189875 │ █████████████████████████ │ 2854683 │ ██████████████▎ │ 109916 │ █████████████████████████ │
|
||||||
|
│ 2016-03-01 │ 63918864 │ █████████████████████████ │ 2969542 │ ██████████████▊ │ 84787 │ █████████████████████▏ │
|
||||||
|
│ 2016-04-01 │ 64271256 │ █████████████████████████ │ 2999086 │ ██████████████▉ │ 61647 │ ███████████████▍ │
|
||||||
|
│ 2016-05-01 │ 65212004 │ █████████████████████████ │ 3034674 │ ███████████████▏ │ 67465 │ ████████████████▊ │
|
||||||
|
│ 2016-06-01 │ 65867743 │ █████████████████████████ │ 3057604 │ ███████████████▎ │ 75170 │ ██████████████████▊ │
|
||||||
|
│ 2016-07-01 │ 66974735 │ █████████████████████████ │ 3199374 │ ███████████████▉ │ 77732 │ ███████████████████▍ │
|
||||||
|
│ 2016-08-01 │ 69654819 │ █████████████████████████ │ 3239957 │ ████████████████▏ │ 63080 │ ███████████████▊ │
|
||||||
|
│ 2016-09-01 │ 67024973 │ █████████████████████████ │ 3190864 │ ███████████████▉ │ 62324 │ ███████████████▌ │
|
||||||
|
│ 2016-10-01 │ 71826553 │ █████████████████████████ │ 3284340 │ ████████████████▍ │ 62549 │ ███████████████▋ │
|
||||||
|
│ 2016-11-01 │ 71022319 │ █████████████████████████ │ 3300822 │ ████████████████▌ │ 69718 │ █████████████████▍ │
|
||||||
|
│ 2016-12-01 │ 72942967 │ █████████████████████████ │ 3430324 │ █████████████████▏ │ 71705 │ █████████████████▉ │
|
||||||
|
│ 2017-01-01 │ 78946585 │ █████████████████████████ │ 3572093 │ █████████████████▊ │ 78198 │ ███████████████████▌ │
|
||||||
|
│ 2017-02-01 │ 70609487 │ █████████████████████████ │ 3421115 │ █████████████████ │ 69823 │ █████████████████▍ │
|
||||||
|
│ 2017-03-01 │ 79723106 │ █████████████████████████ │ 3638122 │ ██████████████████▏ │ 73865 │ ██████████████████▍ │
|
||||||
|
│ 2017-04-01 │ 77478009 │ █████████████████████████ │ 3620591 │ ██████████████████ │ 74387 │ ██████████████████▌ │
|
||||||
|
│ 2017-05-01 │ 79810360 │ █████████████████████████ │ 3650820 │ ██████████████████▎ │ 74356 │ ██████████████████▌ │
|
||||||
|
│ 2017-06-01 │ 79901711 │ █████████████████████████ │ 3737614 │ ██████████████████▋ │ 72114 │ ██████████████████ │
|
||||||
|
│ 2017-07-01 │ 81798725 │ █████████████████████████ │ 3872330 │ ███████████████████▎ │ 76052 │ ███████████████████ │
|
||||||
|
│ 2017-08-01 │ 84658503 │ █████████████████████████ │ 3960093 │ ███████████████████▊ │ 77798 │ ███████████████████▍ │
|
||||||
|
│ 2017-09-01 │ 83165192 │ █████████████████████████ │ 3880501 │ ███████████████████▍ │ 78402 │ ███████████████████▌ │
|
||||||
|
│ 2017-10-01 │ 85828912 │ █████████████████████████ │ 3980335 │ ███████████████████▉ │ 80685 │ ████████████████████▏ │
|
||||||
|
│ 2017-11-01 │ 84965681 │ █████████████████████████ │ 4026749 │ ████████████████████▏ │ 82659 │ ████████████████████▋ │
|
||||||
|
│ 2017-12-01 │ 85973810 │ █████████████████████████ │ 4196354 │ ████████████████████▉ │ 91984 │ ██████████████████████▉ │
|
||||||
|
│ 2018-01-01 │ 91558594 │ █████████████████████████ │ 4364443 │ █████████████████████▊ │ 102577 │ █████████████████████████ │
|
||||||
|
│ 2018-02-01 │ 86467179 │ █████████████████████████ │ 4277899 │ █████████████████████▍ │ 104610 │ █████████████████████████ │
|
||||||
|
│ 2018-03-01 │ 96490262 │ █████████████████████████ │ 4422470 │ ██████████████████████ │ 112559 │ █████████████████████████ │
|
||||||
|
│ 2018-04-01 │ 98101232 │ █████████████████████████ │ 4572434 │ ██████████████████████▊ │ 105284 │ █████████████████████████ │
|
||||||
|
│ 2018-05-01 │ 100109100 │ █████████████████████████ │ 4698908 │ ███████████████████████▍ │ 103910 │ █████████████████████████ │
|
||||||
|
│ 2018-06-01 │ 100009462 │ █████████████████████████ │ 4697426 │ ███████████████████████▍ │ 101107 │ █████████████████████████ │
|
||||||
|
│ 2018-07-01 │ 108151359 │ █████████████████████████ │ 5099492 │ █████████████████████████ │ 106184 │ █████████████████████████ │
|
||||||
|
│ 2018-08-01 │ 107330940 │ █████████████████████████ │ 5084082 │ █████████████████████████ │ 109985 │ █████████████████████████ │
|
||||||
|
│ 2018-09-01 │ 104473929 │ █████████████████████████ │ 5011953 │ █████████████████████████ │ 109710 │ █████████████████████████ │
|
||||||
|
│ 2018-10-01 │ 112346556 │ █████████████████████████ │ 5320405 │ █████████████████████████ │ 112533 │ █████████████████████████ │
|
||||||
|
│ 2018-11-01 │ 112573001 │ █████████████████████████ │ 5353282 │ █████████████████████████ │ 112211 │ █████████████████████████ │
|
||||||
|
│ 2018-12-01 │ 121953600 │ █████████████████████████ │ 5611543 │ █████████████████████████ │ 118291 │ █████████████████████████ │
|
||||||
|
│ 2019-01-01 │ 129386587 │ █████████████████████████ │ 6016687 │ █████████████████████████ │ 125725 │ █████████████████████████ │
|
||||||
|
│ 2019-02-01 │ 120645639 │ █████████████████████████ │ 5974488 │ █████████████████████████ │ 125420 │ █████████████████████████ │
|
||||||
|
│ 2019-03-01 │ 137650471 │ █████████████████████████ │ 6410197 │ █████████████████████████ │ 135924 │ █████████████████████████ │
|
||||||
|
│ 2019-04-01 │ 138473643 │ █████████████████████████ │ 6416384 │ █████████████████████████ │ 139844 │ █████████████████████████ │
|
||||||
|
│ 2019-05-01 │ 142463421 │ █████████████████████████ │ 6574836 │ █████████████████████████ │ 142012 │ █████████████████████████ │
|
||||||
|
│ 2019-06-01 │ 134172939 │ █████████████████████████ │ 6601267 │ █████████████████████████ │ 140997 │ █████████████████████████ │
|
||||||
|
│ 2019-07-01 │ 145965083 │ █████████████████████████ │ 6901822 │ █████████████████████████ │ 147802 │ █████████████████████████ │
|
||||||
|
│ 2019-08-01 │ 146854393 │ █████████████████████████ │ 6993882 │ █████████████████████████ │ 151888 │ █████████████████████████ │
|
||||||
|
│ 2019-09-01 │ 137540219 │ █████████████████████████ │ 7001362 │ █████████████████████████ │ 148839 │ █████████████████████████ │
|
||||||
|
│ 2019-10-01 │ 129771456 │ █████████████████████████ │ 6825690 │ █████████████████████████ │ 144453 │ █████████████████████████ │
|
||||||
|
│ 2019-11-01 │ 107990259 │ █████████████████████████ │ 6368286 │ █████████████████████████ │ 141768 │ █████████████████████████ │
|
||||||
|
│ 2019-12-01 │ 112895934 │ █████████████████████████ │ 6640902 │ █████████████████████████ │ 148277 │ █████████████████████████ │
|
||||||
|
│ 2020-01-01 │ 54354879 │ █████████████████████████ │ 4782339 │ ███████████████████████▉ │ 111658 │ █████████████████████████ │
|
||||||
|
│ 2020-02-01 │ 22696923 │ ███████████▎ │ 3135175 │ ███████████████▋ │ 79521 │ ███████████████████▉ │
|
||||||
|
│ 2020-03-01 │ 3466677 │ █▋ │ 987960 │ ████▉ │ 40901 │ ██████████▏ │
|
||||||
|
└──────────────┴───────────┴───────────────────────────┴─────────┴───────────────────────────┴────────────┴───────────────────────────┘
|
||||||
|
|
||||||
|
172 rows in set. Elapsed: 184.809 sec. Processed 6.74 billion rows, 89.56 GB (36.47 million rows/s., 484.62 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
10. Here are the top 10 subreddits of 2022:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
subreddit,
|
||||||
|
count() AS count
|
||||||
|
FROM reddit
|
||||||
|
WHERE toYear(created_utc) = 2022
|
||||||
|
GROUP BY subreddit
|
||||||
|
ORDER BY count DESC
|
||||||
|
LIMIT 10;
|
||||||
|
```
|
||||||
|
|
||||||
|
The response is:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─subreddit────────┬───count─┐
|
||||||
|
│ AskReddit │ 3858203 │
|
||||||
|
│ politics │ 1356782 │
|
||||||
|
│ memes │ 1249120 │
|
||||||
|
│ nfl │ 883667 │
|
||||||
|
│ worldnews │ 866065 │
|
||||||
|
│ teenagers │ 777095 │
|
||||||
|
│ AmItheAsshole │ 752720 │
|
||||||
|
│ dankmemes │ 657932 │
|
||||||
|
│ nba │ 514184 │
|
||||||
|
│ unpopularopinion │ 473649 │
|
||||||
|
└──────────────────┴─────────┘
|
||||||
|
|
||||||
|
10 rows in set. Elapsed: 27.824 sec. Processed 6.74 billion rows, 53.26 GB (242.22 million rows/s., 1.91 GB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
11. Let's see which subreddits had the biggest increase in commnents from 2018 to 2019:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
subreddit,
|
||||||
|
newcount - oldcount AS diff
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
subreddit,
|
||||||
|
count(*) AS newcount
|
||||||
|
FROM reddit
|
||||||
|
WHERE toYear(created_utc) = 2019
|
||||||
|
GROUP BY subreddit
|
||||||
|
)
|
||||||
|
ALL INNER JOIN
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
subreddit,
|
||||||
|
count(*) AS oldcount
|
||||||
|
FROM reddit
|
||||||
|
WHERE toYear(created_utc) = 2018
|
||||||
|
GROUP BY subreddit
|
||||||
|
) USING (subreddit)
|
||||||
|
ORDER BY diff DESC
|
||||||
|
LIMIT 50
|
||||||
|
SETTINGS joined_subquery_requires_alias = 0;
|
||||||
|
```
|
||||||
|
|
||||||
|
It looks like memes and teenagers were busy on Reddit in 2019:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─subreddit────────────┬─────diff─┐
|
||||||
|
│ memes │ 15368369 │
|
||||||
|
│ AskReddit │ 14663662 │
|
||||||
|
│ teenagers │ 12266991 │
|
||||||
|
│ AmItheAsshole │ 11561538 │
|
||||||
|
│ dankmemes │ 11305158 │
|
||||||
|
│ unpopularopinion │ 6332772 │
|
||||||
|
│ PewdiepieSubmissions │ 5930818 │
|
||||||
|
│ Market76 │ 5014668 │
|
||||||
|
│ relationship_advice │ 3776383 │
|
||||||
|
│ freefolk │ 3169236 │
|
||||||
|
│ Minecraft │ 3160241 │
|
||||||
|
│ classicwow │ 2907056 │
|
||||||
|
│ Animemes │ 2673398 │
|
||||||
|
│ gameofthrones │ 2402835 │
|
||||||
|
│ PublicFreakout │ 2267605 │
|
||||||
|
│ ShitPostCrusaders │ 2207266 │
|
||||||
|
│ RoastMe │ 2195715 │
|
||||||
|
│ gonewild │ 2148649 │
|
||||||
|
│ AnthemTheGame │ 1803818 │
|
||||||
|
│ entitledparents │ 1706270 │
|
||||||
|
│ MortalKombat │ 1679508 │
|
||||||
|
│ Cringetopia │ 1620555 │
|
||||||
|
│ pokemon │ 1615266 │
|
||||||
|
│ HistoryMemes │ 1608289 │
|
||||||
|
│ Brawlstars │ 1574977 │
|
||||||
|
│ iamatotalpieceofshit │ 1558315 │
|
||||||
|
│ trashy │ 1518549 │
|
||||||
|
│ ChapoTrapHouse │ 1505748 │
|
||||||
|
│ Pikabu │ 1501001 │
|
||||||
|
│ Showerthoughts │ 1475101 │
|
||||||
|
│ cursedcomments │ 1465607 │
|
||||||
|
│ ukpolitics │ 1386043 │
|
||||||
|
│ wallstreetbets │ 1384431 │
|
||||||
|
│ interestingasfuck │ 1378900 │
|
||||||
|
│ wholesomememes │ 1353333 │
|
||||||
|
│ AskOuija │ 1233263 │
|
||||||
|
│ borderlands3 │ 1197192 │
|
||||||
|
│ aww │ 1168257 │
|
||||||
|
│ insanepeoplefacebook │ 1155473 │
|
||||||
|
│ FortniteCompetitive │ 1122778 │
|
||||||
|
│ EpicSeven │ 1117380 │
|
||||||
|
│ FreeKarma4U │ 1116423 │
|
||||||
|
│ YangForPresidentHQ │ 1086700 │
|
||||||
|
│ SquaredCircle │ 1044089 │
|
||||||
|
│ MurderedByWords │ 1042511 │
|
||||||
|
│ AskMen │ 1024434 │
|
||||||
|
│ thedivision │ 1016634 │
|
||||||
|
│ barstoolsports │ 985032 │
|
||||||
|
│ nfl │ 978340 │
|
||||||
|
│ BattlefieldV │ 971408 │
|
||||||
|
└──────────────────────┴──────────┘
|
||||||
|
|
||||||
|
50 rows in set. Elapsed: 65.954 sec. Processed 13.48 billion rows, 79.67 GB (204.37 million rows/s., 1.21 GB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
12. One more query: let's compare ClickHouse mentions to other technologies like Snowflake and Postgres. This query is a big one because it has to search all the comments three times for a substring, and unfortunately ClickHouse user are obviously not very active on Reddit yet:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toStartOfQuarter(created_utc) AS quarter,
|
||||||
|
sum(if(positionCaseInsensitive(body, 'clickhouse') > 0, 1, 0)) AS clickhouse,
|
||||||
|
sum(if(positionCaseInsensitive(body, 'snowflake') > 0, 1, 0)) AS snowflake,
|
||||||
|
sum(if(positionCaseInsensitive(body, 'postgres') > 0, 1, 0)) AS postgres
|
||||||
|
FROM reddit
|
||||||
|
GROUP BY quarter
|
||||||
|
ORDER BY quarter ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌────Quarter─┬─clickhouse─┬─snowflake─┬─postgres─┐
|
||||||
|
│ 2005-10-01 │ 0 │ 0 │ 0 │
|
||||||
|
│ 2006-01-01 │ 0 │ 2 │ 23 │
|
||||||
|
│ 2006-04-01 │ 0 │ 2 │ 24 │
|
||||||
|
│ 2006-07-01 │ 0 │ 4 │ 13 │
|
||||||
|
│ 2006-10-01 │ 0 │ 23 │ 73 │
|
||||||
|
│ 2007-01-01 │ 0 │ 14 │ 91 │
|
||||||
|
│ 2007-04-01 │ 0 │ 10 │ 59 │
|
||||||
|
│ 2007-07-01 │ 0 │ 39 │ 116 │
|
||||||
|
│ 2007-10-01 │ 0 │ 45 │ 125 │
|
||||||
|
│ 2008-01-01 │ 0 │ 53 │ 234 │
|
||||||
|
│ 2008-04-01 │ 0 │ 79 │ 303 │
|
||||||
|
│ 2008-07-01 │ 0 │ 102 │ 174 │
|
||||||
|
│ 2008-10-01 │ 0 │ 156 │ 323 │
|
||||||
|
│ 2009-01-01 │ 0 │ 206 │ 208 │
|
||||||
|
│ 2009-04-01 │ 0 │ 178 │ 417 │
|
||||||
|
│ 2009-07-01 │ 0 │ 300 │ 295 │
|
||||||
|
│ 2009-10-01 │ 0 │ 633 │ 589 │
|
||||||
|
│ 2010-01-01 │ 0 │ 555 │ 501 │
|
||||||
|
│ 2010-04-01 │ 0 │ 587 │ 469 │
|
||||||
|
│ 2010-07-01 │ 0 │ 770 │ 821 │
|
||||||
|
│ 2010-10-01 │ 0 │ 1480 │ 550 │
|
||||||
|
│ 2011-01-01 │ 0 │ 1482 │ 568 │
|
||||||
|
│ 2011-04-01 │ 0 │ 1558 │ 406 │
|
||||||
|
│ 2011-07-01 │ 0 │ 2163 │ 628 │
|
||||||
|
│ 2011-10-01 │ 0 │ 4064 │ 566 │
|
||||||
|
│ 2012-01-01 │ 0 │ 4621 │ 662 │
|
||||||
|
│ 2012-04-01 │ 0 │ 5737 │ 785 │
|
||||||
|
│ 2012-07-01 │ 0 │ 6097 │ 1127 │
|
||||||
|
│ 2012-10-01 │ 0 │ 7986 │ 600 │
|
||||||
|
│ 2013-01-01 │ 0 │ 9704 │ 839 │
|
||||||
|
│ 2013-04-01 │ 0 │ 8161 │ 853 │
|
||||||
|
│ 2013-07-01 │ 0 │ 9704 │ 1028 │
|
||||||
|
│ 2013-10-01 │ 0 │ 12879 │ 1404 │
|
||||||
|
│ 2014-01-01 │ 0 │ 12317 │ 1548 │
|
||||||
|
│ 2014-04-01 │ 0 │ 13181 │ 1577 │
|
||||||
|
│ 2014-07-01 │ 0 │ 15640 │ 1710 │
|
||||||
|
│ 2014-10-01 │ 0 │ 19479 │ 1959 │
|
||||||
|
│ 2015-01-01 │ 0 │ 20411 │ 2104 │
|
||||||
|
│ 2015-04-01 │ 1 │ 20309 │ 9112 │
|
||||||
|
│ 2015-07-01 │ 0 │ 20325 │ 4771 │
|
||||||
|
│ 2015-10-01 │ 0 │ 25087 │ 3030 │
|
||||||
|
│ 2016-01-01 │ 0 │ 23462 │ 3126 │
|
||||||
|
│ 2016-04-01 │ 3 │ 25496 │ 2757 │
|
||||||
|
│ 2016-07-01 │ 4 │ 28233 │ 2928 │
|
||||||
|
│ 2016-10-01 │ 2 │ 45445 │ 2449 │
|
||||||
|
│ 2017-01-01 │ 9 │ 76019 │ 2808 │
|
||||||
|
│ 2017-04-01 │ 9 │ 67919 │ 2803 │
|
||||||
|
│ 2017-07-01 │ 13 │ 68974 │ 2771 │
|
||||||
|
│ 2017-10-01 │ 12 │ 69730 │ 2906 │
|
||||||
|
│ 2018-01-01 │ 17 │ 67476 │ 3152 │
|
||||||
|
│ 2018-04-01 │ 3 │ 67139 │ 3986 │
|
||||||
|
│ 2018-07-01 │ 14 │ 67979 │ 3609 │
|
||||||
|
│ 2018-10-01 │ 28 │ 74147 │ 3850 │
|
||||||
|
│ 2019-01-01 │ 14 │ 80250 │ 4305 │
|
||||||
|
│ 2019-04-01 │ 30 │ 70307 │ 3872 │
|
||||||
|
│ 2019-07-01 │ 33 │ 77149 │ 4164 │
|
||||||
|
│ 2019-10-01 │ 13 │ 76746 │ 3541 │
|
||||||
|
│ 2020-01-01 │ 16 │ 54475 │ 846 │
|
||||||
|
└────────────┴────────────┴───────────┴──────────┘
|
||||||
|
|
||||||
|
58 rows in set. Elapsed: 2663.751 sec. Processed 6.74 billion rows, 1.21 TB (2.53 million rows/s., 454.37 MB/s.)
|
||||||
|
```
|
@ -181,7 +181,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
|||||||
- `--queries-file` – file path with queries to execute. You must specify either `query` or `queries-file` option.
|
- `--queries-file` – file path with queries to execute. You must specify either `query` or `queries-file` option.
|
||||||
- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ by default).
|
- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ by default).
|
||||||
- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter).
|
- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter).
|
||||||
- `--multiquery, -n` – If specified, allow processing multiple queries separated by semicolons.
|
- `--multiquery, -n` – If specified, multiple queries separated by semicolons can be listed after the `--query` option. For convenience, it is also possible to omit `--query` and pass the queries directly after `--multiquery`.
|
||||||
- `--format, -f` – Use the specified default format to output the result.
|
- `--format, -f` – Use the specified default format to output the result.
|
||||||
- `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
|
- `--vertical, -E` – If specified, use the [Vertical format](../interfaces/formats.md#vertical) by default to output the result. This is the same as `–format=Vertical`. In this format, each value is printed on a separate line, which is helpful when displaying wide tables.
|
||||||
- `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode.
|
- `--time, -t` – If specified, print the query execution time to ‘stderr’ in non-interactive mode.
|
||||||
|
@ -2,34 +2,115 @@
|
|||||||
slug: /en/operations/named-collections
|
slug: /en/operations/named-collections
|
||||||
sidebar_position: 69
|
sidebar_position: 69
|
||||||
sidebar_label: "Named collections"
|
sidebar_label: "Named collections"
|
||||||
|
title: "Named collections"
|
||||||
---
|
---
|
||||||
|
|
||||||
# Storing details for connecting to external sources in configuration files
|
Named collections provide a way to store collections of key-value pairs to be
|
||||||
|
used to configure integrations with external sources. You can use named collections with
|
||||||
|
dictionaries, tables, table functions, and object storage.
|
||||||
|
|
||||||
Details for connecting to external sources (dictionaries, tables, table functions) can be saved
|
Named collections can be configured with DDL or in configuration files and are applied
|
||||||
in configuration files and thus simplify the creation of objects and hide credentials
|
when ClickHouse starts. They simplify the creation of objects and the hiding of credentials
|
||||||
from users with only SQL access.
|
from users without administrative access.
|
||||||
|
|
||||||
Parameters can be set in XML `<format>CSV</format>` and overridden in SQL `, format = 'TSV'`.
|
The keys in a named collection must match the parameter names of the corresponding
|
||||||
The parameters in SQL can be overridden using format `key` = `value`: `compression_method = 'gzip'`.
|
function, table engine, database, etc. In the examples below the parameter list is
|
||||||
|
linked to for each type.
|
||||||
|
|
||||||
Named collections are stored in the `config.xml` file of the ClickHouse server in the `<named_collections>` section and are applied when ClickHouse starts.
|
Parameters set in a named collection can be overridden in SQL, this is shown in the examples
|
||||||
|
below.
|
||||||
|
|
||||||
Example of configuration:
|
## Storing named collections in the system database
|
||||||
```xml
|
|
||||||
$ cat /etc/clickhouse-server/config.d/named_collections.xml
|
### DDL example
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE NAMED COLLECTION name AS
|
||||||
|
key_1 = 'value',
|
||||||
|
key_2 = 'value2',
|
||||||
|
url = 'https://connection.url/'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Permissions to create named collections with DDL
|
||||||
|
|
||||||
|
To manage named collections with DDL a user must have the `named_control_collection` privilege. This can be assigned by adding a file to `/etc/clickhouse-server/users.d/`. The example gives the user `default` both the `access_management` and `named_collection_control` privileges:
|
||||||
|
|
||||||
|
```xml title='/etc/clickhouse-server/users.d/user_default.xml'
|
||||||
|
<clickhouse>
|
||||||
|
<users>
|
||||||
|
<default>
|
||||||
|
<password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex replace=true>
|
||||||
|
<access_management>1</access_management>
|
||||||
|
<!-- highlight-start -->
|
||||||
|
<named_collection_control>1</named_collection_control>
|
||||||
|
<!-- highlight-end -->
|
||||||
|
</default>
|
||||||
|
</users>
|
||||||
|
</clickhouse>
|
||||||
|
```
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
In the above example the `passowrd_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user.
|
||||||
|
:::
|
||||||
|
|
||||||
|
## Storing named collections in configuration files
|
||||||
|
|
||||||
|
### XML example
|
||||||
|
|
||||||
|
```xml title='/etc/clickhouse-server/config.d/named_collections.xml'
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<named_collections>
|
<named_collections>
|
||||||
...
|
<name>
|
||||||
|
<key_1>value</key_1>
|
||||||
|
<key_2>value_2</key_2>
|
||||||
|
<url>https://connection.url/</url>
|
||||||
|
</name>
|
||||||
</named_collections>
|
</named_collections>
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Named collections for accessing S3.
|
## Modifying named collections
|
||||||
|
|
||||||
|
Named collections that are created with DDL queries can be altered or dropped with DDL. Named collections created with XML files can be managed by editing or deleting the corresponding XML.
|
||||||
|
|
||||||
|
### Alter a DDL named collection
|
||||||
|
|
||||||
|
Change or add the keys `key1` and `key3` of the collection `collection2`:
|
||||||
|
```sql
|
||||||
|
ALTER NAMED COLLECTION collection2 SET key1=4, key3='value3'
|
||||||
|
```
|
||||||
|
|
||||||
|
Remove the key `key2` from `collection2`:
|
||||||
|
```sql
|
||||||
|
ALTER NAMED COLLECTION collection2 DELETE key2
|
||||||
|
```
|
||||||
|
|
||||||
|
Change or add the key `key1` and delete the key `key3` of the collection `collection2`:
|
||||||
|
```sql
|
||||||
|
ALTER NAMED COLLECTION collection2 SET key1=4, DELETE key3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Drop the DDL named collection `collection2`:
|
||||||
|
```sql
|
||||||
|
DROP NAMED COLLECTION collection2
|
||||||
|
```
|
||||||
|
|
||||||
|
## Named collections for accessing S3
|
||||||
|
|
||||||
The description of parameters see [s3 Table Function](../sql-reference/table-functions/s3.md).
|
The description of parameters see [s3 Table Function](../sql-reference/table-functions/s3.md).
|
||||||
|
|
||||||
Example of configuration:
|
### DDL example
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE NAMED COLLECTION s3_mydata AS
|
||||||
|
access_key_id = 'AKIAIOSFODNN7EXAMPLE',
|
||||||
|
secret_access_key = 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
|
||||||
|
format = 'CSV',
|
||||||
|
url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/'
|
||||||
|
```
|
||||||
|
|
||||||
|
### XML example
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<named_collections>
|
<named_collections>
|
||||||
@ -43,23 +124,23 @@ Example of configuration:
|
|||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with the s3 function
|
### s3() function and S3 Table named collection examples
|
||||||
|
|
||||||
|
Both of the following examples use the same named collection `s3_mydata`:
|
||||||
|
|
||||||
|
#### s3() function
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO FUNCTION s3(s3_mydata, filename = 'test_file.tsv.gz',
|
INSERT INTO FUNCTION s3(s3_mydata, filename = 'test_file.tsv.gz',
|
||||||
format = 'TSV', structure = 'number UInt64', compression_method = 'gzip')
|
format = 'TSV', structure = 'number UInt64', compression_method = 'gzip')
|
||||||
SELECT * FROM numbers(10000);
|
SELECT * FROM numbers(10000);
|
||||||
|
|
||||||
SELECT count()
|
|
||||||
FROM s3(s3_mydata, filename = 'test_file.tsv.gz')
|
|
||||||
|
|
||||||
┌─count()─┐
|
|
||||||
│ 10000 │
|
|
||||||
└─────────┘
|
|
||||||
1 rows in set. Elapsed: 0.279 sec. Processed 10.00 thousand rows, 90.00 KB (35.78 thousand rows/s., 322.02 KB/s.)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with an S3 table
|
:::tip
|
||||||
|
The first argument to the `s3()` function above is the name of the collection, `s3_mydata`. Without named collections, the access key ID, secret, format, and URL would all be passed in every call to the `s3()` function.
|
||||||
|
:::
|
||||||
|
|
||||||
|
#### S3 table
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE s3_engine_table (number Int64)
|
CREATE TABLE s3_engine_table (number Int64)
|
||||||
@ -78,7 +159,22 @@ SELECT * FROM s3_engine_table LIMIT 3;
|
|||||||
|
|
||||||
The description of parameters see [mysql](../sql-reference/table-functions/mysql.md).
|
The description of parameters see [mysql](../sql-reference/table-functions/mysql.md).
|
||||||
|
|
||||||
Example of configuration:
|
### DDL example
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE NAMED COLLECTION mymysql AS
|
||||||
|
user = 'myuser',
|
||||||
|
password = 'mypass',
|
||||||
|
host = '127.0.0.1',
|
||||||
|
port = 3306,
|
||||||
|
database = 'test'
|
||||||
|
connection_pool_size = 8
|
||||||
|
on_duplicate_clause = 1
|
||||||
|
replace_query = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
### XML example
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<named_collections>
|
<named_collections>
|
||||||
@ -96,7 +192,11 @@ Example of configuration:
|
|||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with the mysql function
|
### mysql() function, MySQL table, MySQL database, and Dictionary named collection examples
|
||||||
|
|
||||||
|
The four following examples use the same named collection `mymysql`:
|
||||||
|
|
||||||
|
#### mysql() function
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT count() FROM mysql(mymysql, table = 'test');
|
SELECT count() FROM mysql(mymysql, table = 'test');
|
||||||
@ -105,8 +205,11 @@ SELECT count() FROM mysql(mymysql, table = 'test');
|
|||||||
│ 3 │
|
│ 3 │
|
||||||
└─────────┘
|
└─────────┘
|
||||||
```
|
```
|
||||||
|
:::note
|
||||||
|
The named collection does not specify the `table` parameter, so it is specified in the function call as `table = 'test'`.
|
||||||
|
:::
|
||||||
|
|
||||||
### Example of using named collections with an MySQL table
|
#### MySQL table
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE mytable(A Int64) ENGINE = MySQL(mymysql, table = 'test', connection_pool_size=3, replace_query=0);
|
CREATE TABLE mytable(A Int64) ENGINE = MySQL(mymysql, table = 'test', connection_pool_size=3, replace_query=0);
|
||||||
@ -117,7 +220,11 @@ SELECT count() FROM mytable;
|
|||||||
└─────────┘
|
└─────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with database with engine MySQL
|
:::note
|
||||||
|
The DDL overrides the named collection setting for connection_pool_size.
|
||||||
|
:::
|
||||||
|
|
||||||
|
#### MySQL database
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE mydatabase ENGINE = MySQL(mymysql);
|
CREATE DATABASE mydatabase ENGINE = MySQL(mymysql);
|
||||||
@ -130,7 +237,7 @@ SHOW TABLES FROM mydatabase;
|
|||||||
└────────┘
|
└────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example of using named collections with a dictionary with source MySQL
|
#### MySQL Dictionary
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DICTIONARY dict (A Int64, B String)
|
CREATE DICTIONARY dict (A Int64, B String)
|
||||||
@ -150,6 +257,17 @@ SELECT dictGet('dict', 'B', 2);
|
|||||||
|
|
||||||
The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md).
|
The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md).
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE NAMED COLLECTION mypg AS
|
||||||
|
user = 'pguser',
|
||||||
|
password = 'jw8s0F4',
|
||||||
|
host = '127.0.0.1',
|
||||||
|
port = 5432,
|
||||||
|
database = 'test',
|
||||||
|
schema = 'test_schema',
|
||||||
|
connection_pool_size = 8
|
||||||
|
```
|
||||||
|
|
||||||
Example of configuration:
|
Example of configuration:
|
||||||
```xml
|
```xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
@ -229,12 +347,22 @@ SELECT dictGet('dict', 'b', 2);
|
|||||||
└─────────────────────────┘
|
└─────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## Named collections for accessing remote ClickHouse database
|
## Named collections for accessing a remote ClickHouse database
|
||||||
|
|
||||||
The description of parameters see [remote](../sql-reference/table-functions/remote.md/#parameters).
|
The description of parameters see [remote](../sql-reference/table-functions/remote.md/#parameters).
|
||||||
|
|
||||||
Example of configuration:
|
Example of configuration:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE NAMED COLLECTION remote1 AS
|
||||||
|
host = 'remote_host',
|
||||||
|
port = 9000,
|
||||||
|
database = 'system',
|
||||||
|
user = 'foo',
|
||||||
|
password = 'secret',
|
||||||
|
secure = 1
|
||||||
|
```
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<clickhouse>
|
<clickhouse>
|
||||||
<named_collections>
|
<named_collections>
|
||||||
@ -286,3 +414,4 @@ SELECT dictGet('dict', 'b', 1);
|
|||||||
│ a │
|
│ a │
|
||||||
└─────────────────────────┘
|
└─────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -452,6 +452,8 @@ Possible values:
|
|||||||
|
|
||||||
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which don’t belong to the current bucket are flushed and reassigned.
|
The first phase of a grace join reads the right table and splits it into N buckets depending on the hash value of key columns (initially, N is `grace_hash_join_initial_buckets`). This is done in a way to ensure that each bucket can be processed independently. Rows from the first bucket are added to an in-memory hash table while the others are saved to disk. If the hash table grows beyond the memory limit (e.g., as set by [`max_bytes_in_join`](/docs/en/operations/settings/query-complexity.md/#settings-max_bytes_in_join)), the number of buckets is increased and the assigned bucket for each row. Any rows which don’t belong to the current bucket are flushed and reassigned.
|
||||||
|
|
||||||
|
Supports `INNER/LEFT/RIGHT/FULL ALL/ANY JOIN`.
|
||||||
|
|
||||||
- hash
|
- hash
|
||||||
|
|
||||||
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
|
[Hash join algorithm](https://en.wikipedia.org/wiki/Hash_join) is used. The most generic implementation that supports all combinations of kind and strictness and multiple join keys that are combined with `OR` in the `JOIN ON` section.
|
||||||
@ -1377,6 +1379,12 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `default`.
|
Default value: `default`.
|
||||||
|
|
||||||
|
## allow_experimental_parallel_reading_from_replicas
|
||||||
|
|
||||||
|
If true, ClickHouse will send a SELECT query to all replicas of a table (up to `max_parallel_replicas`) . It will work for any kind of MergeTree table.
|
||||||
|
|
||||||
|
Default value: `false`.
|
||||||
|
|
||||||
## compile_expressions {#compile-expressions}
|
## compile_expressions {#compile-expressions}
|
||||||
|
|
||||||
Enables or disables compilation of frequently used simple functions and operators to native code with LLVM at runtime.
|
Enables or disables compilation of frequently used simple functions and operators to native code with LLVM at runtime.
|
||||||
@ -1708,7 +1716,7 @@ Default value: `100000`.
|
|||||||
|
|
||||||
### async_insert_max_query_number {#async-insert-max-query-number}
|
### async_insert_max_query_number {#async-insert-max-query-number}
|
||||||
|
|
||||||
The maximum number of insert queries per block before being inserted. This setting takes effect only if [async_insert_deduplicate](#settings-async-insert-deduplicate) is enabled.
|
The maximum number of insert queries per block before being inserted. This setting takes effect only if [async_insert_deduplicate](#async-insert-deduplicate) is enabled.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -1739,7 +1747,7 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
### async_insert_deduplicate {#settings-async-insert-deduplicate}
|
### async_insert_deduplicate {#async-insert-deduplicate}
|
||||||
|
|
||||||
Enables or disables insert deduplication of `ASYNC INSERT` (for Replicated\* tables).
|
Enables or disables insert deduplication of `ASYNC INSERT` (for Replicated\* tables).
|
||||||
|
|
||||||
@ -3213,17 +3221,6 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
## allow_experimental_geo_types {#allow-experimental-geo-types}
|
|
||||||
|
|
||||||
Allows working with experimental [geo data types](../../sql-reference/data-types/geo.md).
|
|
||||||
|
|
||||||
Possible values:
|
|
||||||
|
|
||||||
- 0 — Working with geo data types is disabled.
|
|
||||||
- 1 — Working with geo data types is enabled.
|
|
||||||
|
|
||||||
Default value: `0`.
|
|
||||||
|
|
||||||
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
||||||
|
|
||||||
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.
|
Adds a modifier `SYNC` to all `DROP` and `DETACH` queries.
|
||||||
|
@ -97,8 +97,8 @@ Columns:
|
|||||||
- `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP header `X-Forwarded-For` passed in the HTTP query.
|
- `forwarded_for` ([String](../../sql-reference/data-types/string.md)) — HTTP header `X-Forwarded-For` passed in the HTTP query.
|
||||||
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The `quota key` specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
|
- `quota_key` ([String](../../sql-reference/data-types/string.md)) — The `quota key` specified in the [quotas](../../operations/quotas.md) setting (see `keyed`).
|
||||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||||
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/array.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
|
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/map.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
|
||||||
- `Settings` ([Map(String, String)](../../sql-reference/data-types/array.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
- `Settings` ([Map(String, String)](../../sql-reference/data-types/map.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||||
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined.
|
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined.
|
||||||
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution.
|
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution.
|
||||||
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.
|
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.
|
||||||
|
29
docs/en/operations/system-tables/zookeeper_connection.md
Normal file
29
docs/en/operations/system-tables/zookeeper_connection.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/system-tables/zookeeper_connection
|
||||||
|
---
|
||||||
|
#zookeeper_connection
|
||||||
|
|
||||||
|
This table does not exist if ZooKeeper is not configured. The 'system.zookeeper_connection' table shows current connections to ZooKeeper (including auxiliary ZooKeepers). Each row shows information about one connection.
|
||||||
|
|
||||||
|
Columns:
|
||||||
|
|
||||||
|
- `name` ([String](../../sql-reference/data-types/string.md)) — ZooKeeper cluster's name.
|
||||||
|
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
|
||||||
|
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
|
||||||
|
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
|
||||||
|
- `connected_time` ([String](../../sql-reference/data-types/string.md)) — When the connection was established
|
||||||
|
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
|
||||||
|
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
|
||||||
|
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM system.zookeeper_connection;
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─name──────────────┬─host─────────┬─port─┬─index─┬──────connected_time─┬─is_expired─┬─keeper_api_version─┬──────────client_id─┐
|
||||||
|
│ default_zookeeper │ 127.0.0.1 │ 2181 │ 0 │ 2023-05-19 14:30:16 │ 0 │ 0 │ 216349144108826660 │
|
||||||
|
└───────────────────┴──────────────┴──────┴───────┴─────────────────────┴────────────┴────────────────────┴────────────────────┘
|
||||||
|
```
|
@ -188,6 +188,7 @@ Arguments:
|
|||||||
- `-N`, `--table` — table name where to put output data, `table` by default.
|
- `-N`, `--table` — table name where to put output data, `table` by default.
|
||||||
- `--format`, `--output-format` — output format, `TSV` by default.
|
- `--format`, `--output-format` — output format, `TSV` by default.
|
||||||
- `-d`, `--database` — default database, `_local` by default.
|
- `-d`, `--database` — default database, `_local` by default.
|
||||||
|
- `--multiquery, -n` – If specified, multiple queries separated by semicolons can be listed after the `--query` option. For convenience, it is also possible to omit `--query` and pass the queries directly after `--multiquery`.
|
||||||
- `--stacktrace` — whether to dump debug output in case of exception.
|
- `--stacktrace` — whether to dump debug output in case of exception.
|
||||||
- `--echo` — print query before execution.
|
- `--echo` — print query before execution.
|
||||||
- `--verbose` — more details on query execution.
|
- `--verbose` — more details on query execution.
|
||||||
|
@ -29,5 +29,5 @@ ClickHouse data types include:
|
|||||||
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
|
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
|
||||||
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column gettings its default value for the data type)
|
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column gettings its default value for the data type)
|
||||||
- **IP addresses**: use [`IPv4`](./domains/ipv4.md) and [`IPv6`](./domains/ipv6.md) to efficiently store IP addresses
|
- **IP addresses**: use [`IPv4`](./domains/ipv4.md) and [`IPv6`](./domains/ipv6.md) to efficiently store IP addresses
|
||||||
- **Geo types**: for[ geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
|
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
|
||||||
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
|
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
|
||||||
|
@ -267,14 +267,16 @@ or
|
|||||||
LAYOUT(HASHED())
|
LAYOUT(HASHED())
|
||||||
```
|
```
|
||||||
|
|
||||||
If `shards` greater then 1 (default is `1`) the dictionary will load data in parallel, useful if you have huge amount of elements in one dictionary.
|
|
||||||
|
|
||||||
Configuration example:
|
Configuration example:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<layout>
|
<layout>
|
||||||
<hashed>
|
<hashed>
|
||||||
|
<!-- If shards greater then 1 (default is `1`) the dictionary will load
|
||||||
|
data in parallel, useful if you have huge amount of elements in one
|
||||||
|
dictionary. -->
|
||||||
<shards>10</shards>
|
<shards>10</shards>
|
||||||
|
|
||||||
<!-- Size of the backlog for blocks in parallel queue.
|
<!-- Size of the backlog for blocks in parallel queue.
|
||||||
|
|
||||||
Since the bottleneck in parallel loading is rehash, and so to avoid
|
Since the bottleneck in parallel loading is rehash, and so to avoid
|
||||||
@ -284,6 +286,14 @@ Configuration example:
|
|||||||
10000 is good balance between memory and speed.
|
10000 is good balance between memory and speed.
|
||||||
Even for 10e10 elements and can handle all the load without starvation. -->
|
Even for 10e10 elements and can handle all the load without starvation. -->
|
||||||
<shard_load_queue_backlog>10000</shard_load_queue_backlog>
|
<shard_load_queue_backlog>10000</shard_load_queue_backlog>
|
||||||
|
|
||||||
|
<!-- Maximum load factor of the hash table, with greater values, the memory
|
||||||
|
is utilized more efficiently (less memory is wasted) but read/performance
|
||||||
|
may deteriorate.
|
||||||
|
|
||||||
|
Valid values: [0.5, 0.99]
|
||||||
|
Default: 0.5 -->
|
||||||
|
<max_load_factor>0.5</max_load_factor>
|
||||||
</hashed>
|
</hashed>
|
||||||
</layout>
|
</layout>
|
||||||
```
|
```
|
||||||
@ -291,7 +301,7 @@ Configuration example:
|
|||||||
or
|
or
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
LAYOUT(HASHED(SHARDS 10 [SHARD_LOAD_QUEUE_BACKLOG 10000]))
|
LAYOUT(HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000] [MAX_LOAD_FACTOR 0.5]))
|
||||||
```
|
```
|
||||||
|
|
||||||
### sparse_hashed
|
### sparse_hashed
|
||||||
@ -304,14 +314,18 @@ Configuration example:
|
|||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<layout>
|
<layout>
|
||||||
<sparse_hashed />
|
<sparse_hashed>
|
||||||
|
<!-- <shards>1</shards> -->
|
||||||
|
<!-- <shard_load_queue_backlog>10000</shard_load_queue_backlog> -->
|
||||||
|
<!-- <max_load_factor>0.5</max_load_factor> -->
|
||||||
|
</sparse_hashed>
|
||||||
</layout>
|
</layout>
|
||||||
```
|
```
|
||||||
|
|
||||||
or
|
or
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
LAYOUT(SPARSE_HASHED())
|
LAYOUT(SPARSE_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000] [MAX_LOAD_FACTOR 0.5]))
|
||||||
```
|
```
|
||||||
|
|
||||||
It is also possible to use `shards` for this type of dictionary, and again it is more important for `sparse_hashed` then for `hashed`, since `sparse_hashed` is slower.
|
It is also possible to use `shards` for this type of dictionary, and again it is more important for `sparse_hashed` then for `hashed`, since `sparse_hashed` is slower.
|
||||||
@ -325,8 +339,9 @@ Configuration example:
|
|||||||
``` xml
|
``` xml
|
||||||
<layout>
|
<layout>
|
||||||
<complex_key_hashed>
|
<complex_key_hashed>
|
||||||
<shards>1</shards>
|
<!-- <shards>1</shards> -->
|
||||||
<!-- <shard_load_queue_backlog>10000</shard_load_queue_backlog> -->
|
<!-- <shard_load_queue_backlog>10000</shard_load_queue_backlog> -->
|
||||||
|
<!-- <max_load_factor>0.5</max_load_factor> -->
|
||||||
</complex_key_hashed>
|
</complex_key_hashed>
|
||||||
</layout>
|
</layout>
|
||||||
```
|
```
|
||||||
@ -334,7 +349,7 @@ Configuration example:
|
|||||||
or
|
or
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
LAYOUT(COMPLEX_KEY_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000]))
|
LAYOUT(COMPLEX_KEY_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000] [MAX_LOAD_FACTOR 0.5]))
|
||||||
```
|
```
|
||||||
|
|
||||||
### complex_key_sparse_hashed
|
### complex_key_sparse_hashed
|
||||||
@ -346,7 +361,9 @@ Configuration example:
|
|||||||
``` xml
|
``` xml
|
||||||
<layout>
|
<layout>
|
||||||
<complex_key_sparse_hashed>
|
<complex_key_sparse_hashed>
|
||||||
<shards>1</shards>
|
<!-- <shards>1</shards> -->
|
||||||
|
<!-- <shard_load_queue_backlog>10000</shard_load_queue_backlog> -->
|
||||||
|
<!-- <max_load_factor>0.5</max_load_factor> -->
|
||||||
</complex_key_sparse_hashed>
|
</complex_key_sparse_hashed>
|
||||||
</layout>
|
</layout>
|
||||||
```
|
```
|
||||||
@ -354,7 +371,7 @@ Configuration example:
|
|||||||
or
|
or
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
LAYOUT(COMPLEX_KEY_SPARSE_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000]))
|
LAYOUT(COMPLEX_KEY_SPARSE_HASHED([SHARDS 1] [SHARD_LOAD_QUEUE_BACKLOG 10000] [MAX_LOAD_FACTOR 0.5]))
|
||||||
```
|
```
|
||||||
|
|
||||||
### hashed_array
|
### hashed_array
|
||||||
@ -2197,16 +2214,16 @@ Result:
|
|||||||
└─────────────────────────────────┴───────┘
|
└─────────────────────────────────┴───────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## RegExp Tree Dictionary {#regexp-tree-dictionary}
|
## Regular Expression Tree Dictionary {#regexp-tree-dictionary}
|
||||||
|
|
||||||
Regexp Tree dictionary stores multiple trees of regular expressions with attributions. Users can retrieve strings in the dictionary. If a string matches the root of the regexp tree, we will collect the corresponding attributes of the matched root and continue to walk the children. If any of the children matches the string, we will collect attributes and rewrite the old ones if conflicts occur, then continue the traverse until we reach leaf nodes.
|
Regular expression tree dictionaries are a special type of dictionary which represent the mapping from key to attributes using a tree of regular expressions. There are some use cases, e.g. parsing of (user agent)[https://en.wikipedia.org/wiki/User_agent] strings, which can be expressed elegantly with regexp tree dictionaries.
|
||||||
|
|
||||||
Example of the ddl query for creating Regexp Tree dictionary:
|
### Use Regular Expression Tree Dictionary in ClickHouse Open-Source
|
||||||
|
|
||||||
<CloudDetails />
|
Regular expression tree dictionaries are defined in ClickHouse open-source using the YAMLRegExpTree source which is provided the path to a YAML file containing the regular expression tree.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
create dictionary regexp_dict
|
CREATE DICTIONARY regexp_dict
|
||||||
(
|
(
|
||||||
regexp String,
|
regexp String,
|
||||||
name String,
|
name String,
|
||||||
@ -2218,17 +2235,15 @@ LAYOUT(regexp_tree)
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
**Source**
|
The dictionary source `YAMLRegExpTree` represents the structure of a regexp tree. For example:
|
||||||
|
|
||||||
We introduce a type of source called `YAMLRegExpTree` representing the structure of Regexp Tree dictionary. An Example of a valid yaml config is like:
|
```yaml
|
||||||
|
|
||||||
```xml
|
|
||||||
- regexp: 'Linux/(\d+[\.\d]*).+tlinux'
|
- regexp: 'Linux/(\d+[\.\d]*).+tlinux'
|
||||||
name: 'TencentOS'
|
name: 'TencentOS'
|
||||||
version: '\1'
|
version: '\1'
|
||||||
|
|
||||||
- regexp: '\d+/tclwebkit(?:\d+[\.\d]*)'
|
- regexp: '\d+/tclwebkit(?:\d+[\.\d]*)'
|
||||||
name: 'Andriod'
|
name: 'Android'
|
||||||
versions:
|
versions:
|
||||||
- regexp: '33/tclwebkit'
|
- regexp: '33/tclwebkit'
|
||||||
version: '13'
|
version: '13'
|
||||||
@ -2240,17 +2255,14 @@ We introduce a type of source called `YAMLRegExpTree` representing the structure
|
|||||||
version: '10'
|
version: '10'
|
||||||
```
|
```
|
||||||
|
|
||||||
The key `regexp` represents the regular expression of a tree node. The name of key is same as the dictionary key. The `name` and `version` is user-defined attributions in the dicitionary. The `versions` (which can be any name that not appear in attributions or the key) indicates the children nodes of this tree.
|
This config consists of a list of regular expression tree nodes. Each node has the following structure:
|
||||||
|
|
||||||
**Back Reference**
|
- **regexp**: the regular expression of the node.
|
||||||
|
- **attributes**: a list of user-defined dictionary attributes. In this example, there are two attributes: `name` and `version`. The first node defines both attributes. The second node only defines attribute `name`. Attribute `version` is provided by the child nodes of the second node.
|
||||||
|
- The value of an attribute may contain **back references**, referring to capture groups of the matched regular expression. In the example, the value of attribute `version` in the first node consists of a back-reference `\1` to capture group `(\d+[\.\d]*)` in the regular expression. Back-reference numbers range from 1 to 9 and are written as `$1` or `\1` (for number 1). The back reference is replaced by the matched capture group during query execution.
|
||||||
|
- **child nodes**: a list of children of a regexp tree node, each of which has its own attributes and (potentially) children nodes. String matching proceeds in a depth-first fashion. If a string matches a regexp node, the dictionary checks if it also matches the nodes' child nodes. If that is the case, the attributes of the deepest matching node are assigned. Attributes of a child node overwrite equally named attributes of parent nodes. The name of child nodes in YAML files can be arbitrary, e.g. `versions` in above example.
|
||||||
|
|
||||||
The value of an attribution could contain a back reference which refers to a capture group of the matched regular expression. Reference number ranges from 1 to 9 and writes as `$1` or `\1`.
|
Regexp tree dictionaries only allow access using functions `dictGet`, `dictGetOrDefault` and `dictGetOrNull`.
|
||||||
|
|
||||||
During the query execution, the back reference in the value will be replaced by the matched capture group.
|
|
||||||
|
|
||||||
**Query**
|
|
||||||
|
|
||||||
Due to the specialty of Regexp Tree dictionary, we only allow functions `dictGet`, `dictGetOrDefault` and `dictGetOrNull` work with it.
|
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -2260,12 +2272,83 @@ SELECT dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024');
|
|||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
```
|
```text
|
||||||
┌─dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024')─┐
|
┌─dictGet('regexp_dict', ('name', 'version'), '31/tclwebkit1024')─┐
|
||||||
│ ('Andriod','12') │
|
│ ('Android','12') │
|
||||||
└─────────────────────────────────────────────────────────────────┘
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
In this case, we first match the regular expression `\d+/tclwebkit(?:\d+[\.\d]*)` in the top layer's second node. The dictionary then continues to look into the child nodes and finds that the string also matches `3[12]/tclwebkit`. As a result, the value of attribute `name` is `Android` (defined in the first layer) and the value of attribute `version` is `12` (defined the child node).
|
||||||
|
|
||||||
|
With a powerful YAML configure file, we can use a regexp tree dictionaries as a user agent string parser. We support [uap-core](https://github.com/ua-parser/uap-core) and demonstrate how to use it in the functional test [02504_regexp_dictionary_ua_parser](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/02504_regexp_dictionary_ua_parser.sh)
|
||||||
|
|
||||||
|
### Use Regular Expression Tree Dictionary in ClickHouse Cloud
|
||||||
|
|
||||||
|
Above used `YAMLRegExpTree` source works in ClickHouse Open Source but not in ClickHouse Cloud. To use regexp tree dictionaries in ClickHouse could, first create a regexp tree dictionary from a YAML file locally in ClickHouse Open Source, then dump this dictionary into a CSV file using the `dictionary` table function and the [INTO OUTFILE](../statements/select/into-outfile.md) clause.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM dictionary(regexp_dict) INTO OUTFILE('regexp_dict.csv')
|
||||||
|
```
|
||||||
|
|
||||||
|
The content of csv file is:
|
||||||
|
|
||||||
|
```text
|
||||||
|
1,0,"Linux/(\d+[\.\d]*).+tlinux","['version','name']","['\\1','TencentOS']"
|
||||||
|
2,0,"(\d+)/tclwebkit(\d+[\.\d]*)","['comment','version','name']","['test $1 and $2','$1','Android']"
|
||||||
|
3,2,"33/tclwebkit","['version']","['13']"
|
||||||
|
4,2,"3[12]/tclwebkit","['version']","['12']"
|
||||||
|
5,2,"3[12]/tclwebkit","['version']","['11']"
|
||||||
|
6,2,"3[12]/tclwebkit","['version']","['10']"
|
||||||
|
```
|
||||||
|
|
||||||
|
The schema of dumped file is:
|
||||||
|
|
||||||
|
- `id UInt64`: the id of the RegexpTree node.
|
||||||
|
- `parent_id UInt64`: the id of the parent of a node.
|
||||||
|
- `regexp String`: the regular expression string.
|
||||||
|
- `keys Array(String)`: the names of user-defined attributes.
|
||||||
|
- `values Array(String)`: the values of user-defined attributes.
|
||||||
|
|
||||||
|
To create the dictionary in ClickHouse Cloud, first create a table `regexp_dictionary_source_table` with below table structure:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE regexp_dictionary_source_table
|
||||||
|
(
|
||||||
|
id UInt64,
|
||||||
|
parent_id UInt64,
|
||||||
|
regexp String,
|
||||||
|
keys Array(String),
|
||||||
|
values Array(String)
|
||||||
|
) ENGINE=Memory;
|
||||||
|
```
|
||||||
|
|
||||||
|
Then update the local CSV by
|
||||||
|
|
||||||
|
```bash
|
||||||
|
clickhouse client \
|
||||||
|
--host MY_HOST \
|
||||||
|
--secure \
|
||||||
|
--password MY_PASSWORD \
|
||||||
|
--query "
|
||||||
|
INSERT INTO regexp_dictionary_source_table
|
||||||
|
SELECT * FROM input ('id UInt64, parent_id UInt64, regexp String, keys Array(String), values Array(String)')
|
||||||
|
FORMAT CSV" < regexp_dict.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
You can see how to [Insert Local Files](https://clickhouse.com/docs/en/integrations/data-ingestion/insert-local-files) for more details. After we initialize the source table, we can create a RegexpTree by table source:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE DICTIONARY regexp_dict
|
||||||
|
(
|
||||||
|
regexp String,
|
||||||
|
name String,
|
||||||
|
version String
|
||||||
|
PRIMARY KEY(regexp)
|
||||||
|
SOURCE(CLICKHOUSE(TABLE 'regexp_dictionary_source_table'))
|
||||||
|
LIFETIME(0)
|
||||||
|
LAYOUT(regexp_tree);
|
||||||
|
```
|
||||||
|
|
||||||
## Embedded Dictionaries {#embedded-dictionaries}
|
## Embedded Dictionaries {#embedded-dictionaries}
|
||||||
|
|
||||||
<SelfManaged />
|
<SelfManaged />
|
||||||
|
@ -357,14 +357,14 @@ Alias: `SECOND`.
|
|||||||
|
|
||||||
## toUnixTimestamp
|
## toUnixTimestamp
|
||||||
|
|
||||||
For DateTime arguments: converts the value to the number with type UInt32 -- Unix Timestamp (https://en.wikipedia.org/wiki/Unix_time).
|
Converts a string, a date or a date with time to the [Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time) in `UInt32` representation.
|
||||||
|
|
||||||
For String argument: converts the input string to the datetime according to the timezone (optional second argument, server timezone is used by default) and returns the corresponding unix timestamp.
|
If the function is called with a string, it accepts an optional timezone argument.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
toUnixTimestamp(datetime)
|
toUnixTimestamp(date)
|
||||||
toUnixTimestamp(str, [timezone])
|
toUnixTimestamp(str, [timezone])
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -377,15 +377,29 @@ Type: `UInt32`.
|
|||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp
|
SELECT
|
||||||
|
'2017-11-05 08:07:47' AS dt_str,
|
||||||
|
toUnixTimestamp(dt_str) AS from_str,
|
||||||
|
toUnixTimestamp(dt_str, 'Asia/Tokyo') AS from_str_tokyo,
|
||||||
|
toUnixTimestamp(toDateTime(dt_str)) AS from_datetime,
|
||||||
|
toUnixTimestamp(toDateTime64(dt_str, 0)) AS from_datetime64,
|
||||||
|
toUnixTimestamp(toDate(dt_str)) AS from_date,
|
||||||
|
toUnixTimestamp(toDate32(dt_str)) AS from_date32
|
||||||
|
FORMAT Vertical;
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─unix_timestamp─┐
|
Row 1:
|
||||||
│ 1509836867 │
|
──────
|
||||||
└────────────────┘
|
dt_str: 2017-11-05 08:07:47
|
||||||
|
from_str: 1509869267
|
||||||
|
from_str_tokyo: 1509836867
|
||||||
|
from_datetime: 1509869267
|
||||||
|
from_datetime64: 1509869267
|
||||||
|
from_date: 1509840000
|
||||||
|
from_date32: 1509840000
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -12,18 +12,18 @@ This is an experimental feature that is currently in development and is not read
|
|||||||
|
|
||||||
Performs stemming on a given word.
|
Performs stemming on a given word.
|
||||||
|
|
||||||
**Syntax**
|
### Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
stem('language', word)
|
stem('language', word)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
### Arguments
|
||||||
|
|
||||||
- `language` — Language which rules will be applied. Must be in lowercase. [String](../../sql-reference/data-types/string.md#string).
|
- `language` — Language which rules will be applied. Use the two letter [ISO 639-1 code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes).
|
||||||
- `word` — word that needs to be stemmed. Must be in lowercase. [String](../../sql-reference/data-types/string.md#string).
|
- `word` — word that needs to be stemmed. Must be in lowercase. [String](../../sql-reference/data-types/string.md#string).
|
||||||
|
|
||||||
**Examples**
|
### Examples
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
@ -38,23 +38,58 @@ Result:
|
|||||||
│ ['I','think','it','is','a','bless','in','disguis'] │
|
│ ['I','think','it','is','a','bless','in','disguis'] │
|
||||||
└────────────────────────────────────────────────────┘
|
└────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
### Supported languages for stem()
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The stem() function uses the [Snowball stemming](https://snowballstem.org/) library, see the Snowball website for updated languages etc.
|
||||||
|
:::
|
||||||
|
|
||||||
|
- Arabic
|
||||||
|
- Armenian
|
||||||
|
- Basque
|
||||||
|
- Catalan
|
||||||
|
- Danish
|
||||||
|
- Dutch
|
||||||
|
- English
|
||||||
|
- Finnish
|
||||||
|
- French
|
||||||
|
- German
|
||||||
|
- Greek
|
||||||
|
- Hindi
|
||||||
|
- Hungarian
|
||||||
|
- Indonesian
|
||||||
|
- Irish
|
||||||
|
- Italian
|
||||||
|
- Lithuanian
|
||||||
|
- Nepali
|
||||||
|
- Norwegian
|
||||||
|
- Porter
|
||||||
|
- Portuguese
|
||||||
|
- Romanian
|
||||||
|
- Russian
|
||||||
|
- Serbian
|
||||||
|
- Spanish
|
||||||
|
- Swedish
|
||||||
|
- Tamil
|
||||||
|
- Turkish
|
||||||
|
- Yiddish
|
||||||
|
|
||||||
## lemmatize
|
## lemmatize
|
||||||
|
|
||||||
Performs lemmatization on a given word. Needs dictionaries to operate, which can be obtained [here](https://github.com/vpodpecan/lemmagen3/tree/master/src/lemmagen3/models).
|
Performs lemmatization on a given word. Needs dictionaries to operate, which can be obtained [here](https://github.com/vpodpecan/lemmagen3/tree/master/src/lemmagen3/models).
|
||||||
|
|
||||||
**Syntax**
|
### Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
lemmatize('language', word)
|
lemmatize('language', word)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
### Arguments
|
||||||
|
|
||||||
- `language` — Language which rules will be applied. [String](../../sql-reference/data-types/string.md#string).
|
- `language` — Language which rules will be applied. [String](../../sql-reference/data-types/string.md#string).
|
||||||
- `word` — Word that needs to be lemmatized. Must be lowercase. [String](../../sql-reference/data-types/string.md#string).
|
- `word` — Word that needs to be lemmatized. Must be lowercase. [String](../../sql-reference/data-types/string.md#string).
|
||||||
|
|
||||||
**Examples**
|
### Examples
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
@ -70,12 +105,18 @@ Result:
|
|||||||
└─────────────────────┘
|
└─────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Configuration:
|
### Configuration
|
||||||
|
|
||||||
|
This configuration specifies that the dictionary `en.bin` should be used for lemmatization of English (`en`) words. The `.bin` files can be downloaded from
|
||||||
|
[here](https://github.com/vpodpecan/lemmagen3/tree/master/src/lemmagen3/models).
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
<lemmatizers>
|
<lemmatizers>
|
||||||
<lemmatizer>
|
<lemmatizer>
|
||||||
|
<!-- highlight-start -->
|
||||||
<lang>en</lang>
|
<lang>en</lang>
|
||||||
<path>en.bin</path>
|
<path>en.bin</path>
|
||||||
|
<!-- highlight-end -->
|
||||||
</lemmatizer>
|
</lemmatizer>
|
||||||
</lemmatizers>
|
</lemmatizers>
|
||||||
```
|
```
|
||||||
@ -88,18 +129,18 @@ With the `plain` extension type we need to provide a path to a simple text file,
|
|||||||
|
|
||||||
With the `wordnet` extension type we need to provide a path to a directory with WordNet thesaurus in it. Thesaurus must contain a WordNet sense index.
|
With the `wordnet` extension type we need to provide a path to a directory with WordNet thesaurus in it. Thesaurus must contain a WordNet sense index.
|
||||||
|
|
||||||
**Syntax**
|
### Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
synonyms('extension_name', word)
|
synonyms('extension_name', word)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
### Arguments
|
||||||
|
|
||||||
- `extension_name` — Name of the extension in which search will be performed. [String](../../sql-reference/data-types/string.md#string).
|
- `extension_name` — Name of the extension in which search will be performed. [String](../../sql-reference/data-types/string.md#string).
|
||||||
- `word` — Word that will be searched in extension. [String](../../sql-reference/data-types/string.md#string).
|
- `word` — Word that will be searched in extension. [String](../../sql-reference/data-types/string.md#string).
|
||||||
|
|
||||||
**Examples**
|
### Examples
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
@ -115,7 +156,7 @@ Result:
|
|||||||
└──────────────────────────────────────────┘
|
└──────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Configuration:
|
### Configuration
|
||||||
``` xml
|
``` xml
|
||||||
<synonyms_extensions>
|
<synonyms_extensions>
|
||||||
<extension>
|
<extension>
|
||||||
@ -137,17 +178,17 @@ Detects the language of the UTF8-encoded input string. The function uses the [CL
|
|||||||
|
|
||||||
The `detectLanguage` function works best when providing over 200 characters in the input string.
|
The `detectLanguage` function works best when providing over 200 characters in the input string.
|
||||||
|
|
||||||
**Syntax**
|
### Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
detectLanguage('text_to_be_analyzed')
|
detectLanguage('text_to_be_analyzed')
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
### Arguments
|
||||||
|
|
||||||
- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../../sql-reference/data-types/string.md#string).
|
- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../../sql-reference/data-types/string.md#string).
|
||||||
|
|
||||||
**Returned value**
|
### Returned value
|
||||||
|
|
||||||
- The 2-letter ISO code of the detected language
|
- The 2-letter ISO code of the detected language
|
||||||
|
|
||||||
@ -156,7 +197,7 @@ Other possible results:
|
|||||||
- `un` = unknown, can not detect any language.
|
- `un` = unknown, can not detect any language.
|
||||||
- `other` = the detected language does not have 2 letter code.
|
- `other` = the detected language does not have 2 letter code.
|
||||||
|
|
||||||
**Examples**
|
### Examples
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
@ -175,22 +216,22 @@ fr
|
|||||||
Similar to the `detectLanguage` function, but `detectLanguageMixed` returns a `Map` of 2-letter language codes that are mapped to the percentage of the certain language in the text.
|
Similar to the `detectLanguage` function, but `detectLanguageMixed` returns a `Map` of 2-letter language codes that are mapped to the percentage of the certain language in the text.
|
||||||
|
|
||||||
|
|
||||||
**Syntax**
|
### Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
detectLanguageMixed('text_to_be_analyzed')
|
detectLanguageMixed('text_to_be_analyzed')
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
### Arguments
|
||||||
|
|
||||||
- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../../sql-reference/data-types/string.md#string).
|
- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../../sql-reference/data-types/string.md#string).
|
||||||
|
|
||||||
**Returned value**
|
### Returned value
|
||||||
|
|
||||||
- `Map(String, Float32)`: The keys are 2-letter ISO codes and the values are a percentage of text found for that language
|
- `Map(String, Float32)`: The keys are 2-letter ISO codes and the values are a percentage of text found for that language
|
||||||
|
|
||||||
|
|
||||||
**Examples**
|
### Examples
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
@ -211,17 +252,17 @@ Result:
|
|||||||
Similar to the `detectLanguage` function, except the `detectLanguageUnknown` function works with non-UTF8-encoded strings. Prefer this version when your character set is UTF-16 or UTF-32.
|
Similar to the `detectLanguage` function, except the `detectLanguageUnknown` function works with non-UTF8-encoded strings. Prefer this version when your character set is UTF-16 or UTF-32.
|
||||||
|
|
||||||
|
|
||||||
**Syntax**
|
### Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
detectLanguageUnknown('text_to_be_analyzed')
|
detectLanguageUnknown('text_to_be_analyzed')
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
### Arguments
|
||||||
|
|
||||||
- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../../sql-reference/data-types/string.md#string).
|
- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../../sql-reference/data-types/string.md#string).
|
||||||
|
|
||||||
**Returned value**
|
### Returned value
|
||||||
|
|
||||||
- The 2-letter ISO code of the detected language
|
- The 2-letter ISO code of the detected language
|
||||||
|
|
||||||
@ -230,7 +271,7 @@ Other possible results:
|
|||||||
- `un` = unknown, can not detect any language.
|
- `un` = unknown, can not detect any language.
|
||||||
- `other` = the detected language does not have 2 letter code.
|
- `other` = the detected language does not have 2 letter code.
|
||||||
|
|
||||||
**Examples**
|
### Examples
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
@ -251,21 +292,21 @@ Result:
|
|||||||
The `detectCharset` function detects the character set of the non-UTF8-encoded input string.
|
The `detectCharset` function detects the character set of the non-UTF8-encoded input string.
|
||||||
|
|
||||||
|
|
||||||
**Syntax**
|
### Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
detectCharset('text_to_be_analyzed')
|
detectCharset('text_to_be_analyzed')
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
### Arguments
|
||||||
|
|
||||||
- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../../sql-reference/data-types/string.md#string).
|
- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../../sql-reference/data-types/string.md#string).
|
||||||
|
|
||||||
**Returned value**
|
### Returned value
|
||||||
|
|
||||||
- A `String` containing the code of the detected character set
|
- A `String` containing the code of the detected character set
|
||||||
|
|
||||||
**Examples**
|
### Examples
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
|
@ -284,13 +284,17 @@ Manipulates data in the specifies partition matching the specified filtering exp
|
|||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] [IN PARTITION partition_id] WHERE filter_expr
|
ALTER TABLE [db.]table [ON CLUSTER cluster] UPDATE column1 = expr1 [, ...] [IN PARTITION partition_expr] WHERE filter_expr
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
-- using partition name
|
||||||
ALTER TABLE mt UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2;
|
ALTER TABLE mt UPDATE x = x + 1 IN PARTITION 2 WHERE p = 2;
|
||||||
|
|
||||||
|
-- using partition id
|
||||||
|
ALTER TABLE mt UPDATE x = x + 1 IN PARTITION ID '2' WHERE p = 2;
|
||||||
```
|
```
|
||||||
|
|
||||||
### See Also
|
### See Also
|
||||||
@ -304,13 +308,17 @@ Deletes data in the specifies partition matching the specified filtering express
|
|||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE [IN PARTITION partition_id] WHERE filter_expr
|
ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE [IN PARTITION partition_expr] WHERE filter_expr
|
||||||
```
|
```
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
-- using partition name
|
||||||
ALTER TABLE mt DELETE IN PARTITION 2 WHERE p = 2;
|
ALTER TABLE mt DELETE IN PARTITION 2 WHERE p = 2;
|
||||||
|
|
||||||
|
-- using partition id
|
||||||
|
ALTER TABLE mt DELETE IN PARTITION ID '2' WHERE p = 2;
|
||||||
```
|
```
|
||||||
|
|
||||||
### See Also
|
### See Also
|
||||||
|
@ -9,7 +9,7 @@ sidebar_label: GRANT
|
|||||||
- Grants [privileges](#grant-privileges) to ClickHouse user accounts or roles.
|
- Grants [privileges](#grant-privileges) to ClickHouse user accounts or roles.
|
||||||
- Assigns roles to user accounts or to the other roles.
|
- Assigns roles to user accounts or to the other roles.
|
||||||
|
|
||||||
To revoke privileges, use the [REVOKE](../../sql-reference/statements/revoke.md) statement. Also you can list granted privileges with the [SHOW GRANTS](../../sql-reference/statements/show.md#show-grants-statement) statement.
|
To revoke privileges, use the [REVOKE](../../sql-reference/statements/revoke.md) statement. Also you can list granted privileges with the [SHOW GRANTS](../../sql-reference/statements/show.md#show-grants) statement.
|
||||||
|
|
||||||
## Granting Privilege Syntax
|
## Granting Privilege Syntax
|
||||||
|
|
||||||
|
@ -47,3 +47,12 @@ SELECT * FROM test_table;
|
|||||||
|
|
||||||
Patterns in curly brackets `{ }` are used to generate a set of shards or to specify failover addresses. Supported pattern types and examples see in the description of the [remote](remote.md#globs-in-addresses) function.
|
Patterns in curly brackets `{ }` are used to generate a set of shards or to specify failover addresses. Supported pattern types and examples see in the description of the [remote](remote.md#globs-in-addresses) function.
|
||||||
Character `|` inside patterns is used to specify failover addresses. They are iterated in the same order as listed in the pattern. The number of generated addresses is limited by [glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements) setting.
|
Character `|` inside patterns is used to specify failover addresses. They are iterated in the same order as listed in the pattern. The number of generated addresses is limited by [glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements) setting.
|
||||||
|
|
||||||
|
## Virtual Columns
|
||||||
|
|
||||||
|
- `_path` — Path to the `URL`.
|
||||||
|
- `_file` — Resource name of the `URL`.
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [Virtual columns](/docs/en/engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
@ -132,7 +132,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe
|
|||||||
- `--queries-file` - путь к файлу с запросами для выполнения. Необходимо указать только одну из опций: `query` или `queries-file`.
|
- `--queries-file` - путь к файлу с запросами для выполнения. Необходимо указать только одну из опций: `query` или `queries-file`.
|
||||||
- `--database, -d` — выбрать текущую БД. Без указания значение берется из настроек сервера (по умолчанию — БД ‘default’).
|
- `--database, -d` — выбрать текущую БД. Без указания значение берется из настроек сервера (по умолчанию — БД ‘default’).
|
||||||
- `--multiline, -m` — если указано — разрешить многострочные запросы, не отправлять запрос по нажатию Enter.
|
- `--multiline, -m` — если указано — разрешить многострочные запросы, не отправлять запрос по нажатию Enter.
|
||||||
- `--multiquery, -n` — если указано — разрешить выполнять несколько запросов, разделённых точкой с запятой.
|
- `--multiquery, -n` — Если указано, то после опции `--query` могут быть перечислены несколько запросов, разделенных точкой с запятой. Для удобства можно также опустить `--query` и передавать запросы непосредственно после `--multiquery`.
|
||||||
- `--format, -f` — использовать указанный формат по умолчанию для вывода результата.
|
- `--format, -f` — использовать указанный формат по умолчанию для вывода результата.
|
||||||
- `--vertical, -E` — если указано, использовать по умолчанию формат [Vertical](../interfaces/formats.md#vertical) для вывода результата. То же самое, что `–format=Vertical`. В этом формате каждое значение выводится на отдельной строке, что удобно для отображения широких таблиц.
|
- `--vertical, -E` — если указано, использовать по умолчанию формат [Vertical](../interfaces/formats.md#vertical) для вывода результата. То же самое, что `–format=Vertical`. В этом формате каждое значение выводится на отдельной строке, что удобно для отображения широких таблиц.
|
||||||
- `--time, -t` — если указано, в неинтерактивном режиме вывести время выполнения запроса в поток ‘stderr’.
|
- `--time, -t` — если указано, в неинтерактивном режиме вывести время выполнения запроса в поток ‘stderr’.
|
||||||
|
@ -3185,16 +3185,6 @@ SELECT * FROM test2;
|
|||||||
|
|
||||||
Значение по умолчанию: `0`.
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
## allow_experimental_geo_types {#allow-experimental-geo-types}
|
|
||||||
|
|
||||||
Разрешает использование экспериментальных типов данных для работы с [географическими структурами](../../sql-reference/data-types/geo.md).
|
|
||||||
|
|
||||||
Возможные значения:
|
|
||||||
- 0 — использование типов данных для работы с географическими структурами не поддерживается.
|
|
||||||
- 1 — использование типов данных для работы с географическими структурами поддерживается.
|
|
||||||
|
|
||||||
Значение по умолчанию: `0`.
|
|
||||||
|
|
||||||
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
## database_atomic_wait_for_drop_and_detach_synchronously {#database_atomic_wait_for_drop_and_detach_synchronously}
|
||||||
|
|
||||||
Добавляет модификатор `SYNC` ко всем запросам `DROP` и `DETACH`.
|
Добавляет модификатор `SYNC` ко всем запросам `DROP` и `DETACH`.
|
||||||
|
@ -8,13 +8,8 @@ sidebar_label: Географические структуры
|
|||||||
|
|
||||||
ClickHouse поддерживает типы данных для отображения географических объектов — точек (местоположений), территорий и т.п.
|
ClickHouse поддерживает типы данных для отображения географических объектов — точек (местоположений), территорий и т.п.
|
||||||
|
|
||||||
:::danger "Предупреждение"
|
|
||||||
Сейчас использование типов данных для работы с географическими структурами является экспериментальной возможностью. Чтобы использовать эти типы данных, включите настройку `allow_experimental_geo_types = 1`.
|
|
||||||
:::
|
|
||||||
|
|
||||||
**См. также**
|
**См. также**
|
||||||
- [Хранение географических структур данных](https://ru.wikipedia.org/wiki/GeoJSON).
|
- [Хранение географических структур данных](https://ru.wikipedia.org/wiki/GeoJSON).
|
||||||
- Настройка [allow_experimental_geo_types](../../operations/settings/settings.md#allow-experimental-geo-types).
|
|
||||||
|
|
||||||
## Point {#point-data-type}
|
## Point {#point-data-type}
|
||||||
|
|
||||||
@ -25,7 +20,6 @@ ClickHouse поддерживает типы данных для отображ
|
|||||||
Запрос:
|
Запрос:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SET allow_experimental_geo_types = 1;
|
|
||||||
CREATE TABLE geo_point (p Point) ENGINE = Memory();
|
CREATE TABLE geo_point (p Point) ENGINE = Memory();
|
||||||
INSERT INTO geo_point VALUES((10, 10));
|
INSERT INTO geo_point VALUES((10, 10));
|
||||||
SELECT p, toTypeName(p) FROM geo_point;
|
SELECT p, toTypeName(p) FROM geo_point;
|
||||||
@ -47,7 +41,6 @@ SELECT p, toTypeName(p) FROM geo_point;
|
|||||||
Запрос:
|
Запрос:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SET allow_experimental_geo_types = 1;
|
|
||||||
CREATE TABLE geo_ring (r Ring) ENGINE = Memory();
|
CREATE TABLE geo_ring (r Ring) ENGINE = Memory();
|
||||||
INSERT INTO geo_ring VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]);
|
INSERT INTO geo_ring VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]);
|
||||||
SELECT r, toTypeName(r) FROM geo_ring;
|
SELECT r, toTypeName(r) FROM geo_ring;
|
||||||
@ -69,7 +62,6 @@ SELECT r, toTypeName(r) FROM geo_ring;
|
|||||||
Запись в этой таблице описывает многоугольник с одной дырой:
|
Запись в этой таблице описывает многоугольник с одной дырой:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SET allow_experimental_geo_types = 1;
|
|
||||||
CREATE TABLE geo_polygon (pg Polygon) ENGINE = Memory();
|
CREATE TABLE geo_polygon (pg Polygon) ENGINE = Memory();
|
||||||
INSERT INTO geo_polygon VALUES([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]]);
|
INSERT INTO geo_polygon VALUES([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]]);
|
||||||
SELECT pg, toTypeName(pg) FROM geo_polygon;
|
SELECT pg, toTypeName(pg) FROM geo_polygon;
|
||||||
@ -92,7 +84,6 @@ SELECT pg, toTypeName(pg) FROM geo_polygon;
|
|||||||
Запись в этой таблице описывает элемент, состоящий из двух многоугольников — первый без дыр, а второй с одной дырой:
|
Запись в этой таблице описывает элемент, состоящий из двух многоугольников — первый без дыр, а второй с одной дырой:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SET allow_experimental_geo_types = 1;
|
|
||||||
CREATE TABLE geo_multipolygon (mpg MultiPolygon) ENGINE = Memory();
|
CREATE TABLE geo_multipolygon (mpg MultiPolygon) ENGINE = Memory();
|
||||||
INSERT INTO geo_multipolygon VALUES([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]);
|
INSERT INTO geo_multipolygon VALUES([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]);
|
||||||
SELECT mpg, toTypeName(mpg) FROM geo_multipolygon;
|
SELECT mpg, toTypeName(mpg) FROM geo_multipolygon;
|
||||||
|
@ -235,13 +235,13 @@ SELECT toDateTime('2021-04-21 10:20:30', 'Europe/Moscow') AS Time, toTypeName(Ti
|
|||||||
|
|
||||||
## toUnixTimestamp {#to-unix-timestamp}
|
## toUnixTimestamp {#to-unix-timestamp}
|
||||||
|
|
||||||
Переводит дату-с-временем в число типа UInt32 -- Unix Timestamp (https://en.wikipedia.org/wiki/Unix_time).
|
Переводит строку, дату или дату-с-временем в [Unix Timestamp](https://en.wikipedia.org/wiki/Unix_time), имеющий тип `UInt32`.
|
||||||
Для аргумента String, строка конвертируется в дату и время в соответствии с часовым поясом (необязательный второй аргумент, часовой пояс сервера используется по умолчанию).
|
Строка может сопровождаться вторым (необязательным) аргументом, указывающим часовой пояс.
|
||||||
|
|
||||||
**Синтаксис**
|
**Синтаксис**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
toUnixTimestamp(datetime)
|
toUnixTimestamp(date)
|
||||||
toUnixTimestamp(str, [timezone])
|
toUnixTimestamp(str, [timezone])
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -256,15 +256,29 @@ toUnixTimestamp(str, [timezone])
|
|||||||
Запрос:
|
Запрос:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp;
|
SELECT
|
||||||
|
'2017-11-05 08:07:47' AS dt_str,
|
||||||
|
toUnixTimestamp(dt_str) AS from_str,
|
||||||
|
toUnixTimestamp(dt_str, 'Asia/Tokyo') AS from_str_tokyo,
|
||||||
|
toUnixTimestamp(toDateTime(dt_str)) AS from_datetime,
|
||||||
|
toUnixTimestamp(toDateTime64(dt_str, 0)) AS from_datetime64,
|
||||||
|
toUnixTimestamp(toDate(dt_str)) AS from_date,
|
||||||
|
toUnixTimestamp(toDate32(dt_str)) AS from_date32
|
||||||
|
FORMAT Vertical;
|
||||||
```
|
```
|
||||||
|
|
||||||
Результат:
|
Результат:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
┌─unix_timestamp─┐
|
Row 1:
|
||||||
│ 1509836867 │
|
──────
|
||||||
└────────────────┘
|
dt_str: 2017-11-05 08:07:47
|
||||||
|
from_str: 1509869267
|
||||||
|
from_str_tokyo: 1509836867
|
||||||
|
from_datetime: 1509869267
|
||||||
|
from_datetime64: 1509869267
|
||||||
|
from_date: 1509840000
|
||||||
|
from_date32: 1509840000
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -47,3 +47,12 @@ SELECT * FROM test_table;
|
|||||||
|
|
||||||
Шаблоны в фигурных скобках `{ }` используются, чтобы сгенерировать список шардов или указать альтернативные адреса на случай отказа. Поддерживаемые типы шаблонов и примеры смотрите в описании функции [remote](remote.md#globs-in-addresses).
|
Шаблоны в фигурных скобках `{ }` используются, чтобы сгенерировать список шардов или указать альтернативные адреса на случай отказа. Поддерживаемые типы шаблонов и примеры смотрите в описании функции [remote](remote.md#globs-in-addresses).
|
||||||
Символ `|` внутри шаблонов используется, чтобы задать адреса, если предыдущие оказались недоступны. Эти адреса перебираются в том же порядке, в котором они указаны в шаблоне. Количество адресов, которые могут быть сгенерированы, ограничено настройкой [glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements).
|
Символ `|` внутри шаблонов используется, чтобы задать адреса, если предыдущие оказались недоступны. Эти адреса перебираются в том же порядке, в котором они указаны в шаблоне. Количество адресов, которые могут быть сгенерированы, ограничено настройкой [glob_expansion_max_elements](../../operations/settings/settings.md#glob_expansion_max_elements).
|
||||||
|
|
||||||
|
## Виртуальные столбцы
|
||||||
|
|
||||||
|
- `_path` — Путь до `URL`.
|
||||||
|
- `_file` — Имя ресурса `URL`.
|
||||||
|
|
||||||
|
**Смотрите также**
|
||||||
|
|
||||||
|
- [Виртуальные столбцы](index.md#table_engines-virtual_columns)
|
||||||
|
@ -55,7 +55,7 @@ GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION
|
|||||||
|
|
||||||
同样 `john` 有权执行 `GRANT OPTION`,因此他能给其它账号进行和自己账号权限范围相同的授权。
|
同样 `john` 有权执行 `GRANT OPTION`,因此他能给其它账号进行和自己账号权限范围相同的授权。
|
||||||
|
|
||||||
可以使用`*` 号代替表或库名进行授权操作。例如, `GRANT SELECT ONdb.* TO john` 操作运行 `john`对 `db`库的所有表执行 `SELECT`查询。同样,你可以忽略库名。在这种情形下,权限将指向当前的数据库。例如, `GRANT SELECT ON* to john` 对当前数据库的所有表指定授权, `GARNT SELECT ON mytable to john`对当前数据库的 `mytable`表进行授权。
|
可以使用`*` 号代替表或库名进行授权操作。例如, `GRANT SELECT ONdb.* TO john` 操作运行 `john`对 `db`库的所有表执行 `SELECT`查询。同样,你可以忽略库名。在这种情形下,权限将指向当前的数据库。例如, `GRANT SELECT ON* to john` 对当前数据库的所有表指定授权, `GRANT SELECT ON mytable to john`对当前数据库的 `mytable`表进行授权。
|
||||||
|
|
||||||
访问 `systen`数据库总是被允许的(因为这个数据库用来处理sql操作)
|
访问 `systen`数据库总是被允许的(因为这个数据库用来处理sql操作)
|
||||||
可以一次给多个账号进行多种授权操作。 `GRANT SELECT,INSERT ON *.* TO john,robin` 允许 `john`和`robin` 账号对任意数据库的任意表执行 `INSERT`和 `SELECT`操作。
|
可以一次给多个账号进行多种授权操作。 `GRANT SELECT,INSERT ON *.* TO john,robin` 允许 `john`和`robin` 账号对任意数据库的任意表执行 `INSERT`和 `SELECT`操作。
|
||||||
|
@ -41,3 +41,11 @@ CREATE TABLE test_table (column1 String, column2 UInt32) ENGINE=Memory;
|
|||||||
INSERT INTO FUNCTION url('http://127.0.0.1:8123/?query=INSERT+INTO+test_table+FORMAT+CSV', 'CSV', 'column1 String, column2 UInt32') VALUES ('http interface', 42);
|
INSERT INTO FUNCTION url('http://127.0.0.1:8123/?query=INSERT+INTO+test_table+FORMAT+CSV', 'CSV', 'column1 String, column2 UInt32') VALUES ('http interface', 42);
|
||||||
SELECT * FROM test_table;
|
SELECT * FROM test_table;
|
||||||
```
|
```
|
||||||
|
## 虚拟列 {#virtual-columns}
|
||||||
|
|
||||||
|
- `_path` — `URL`路径。
|
||||||
|
- `_file` — 资源名称。
|
||||||
|
|
||||||
|
**另请参阅**
|
||||||
|
|
||||||
|
- [虚拟列](https://clickhouse.com/docs/en/operations/table_engines/#table_engines-virtual_columns)
|
||||||
|
@ -1181,7 +1181,7 @@ void Client::processOptions(const OptionsDescription & options_description,
|
|||||||
void Client::processConfig()
|
void Client::processConfig()
|
||||||
{
|
{
|
||||||
/// Batch mode is enabled if one of the following is true:
|
/// Batch mode is enabled if one of the following is true:
|
||||||
/// - -e (--query) command line option is present.
|
/// - -q (--query) command line option is present.
|
||||||
/// The value of the option is used as the text of query (or of multiple queries).
|
/// The value of the option is used as the text of query (or of multiple queries).
|
||||||
/// If stdin is not a terminal, INSERT data for the first query is read from it.
|
/// If stdin is not a terminal, INSERT data for the first query is read from it.
|
||||||
/// - stdin is not a terminal. In this case queries are read from it.
|
/// - stdin is not a terminal. In this case queries are read from it.
|
||||||
@ -1381,6 +1381,13 @@ void Client::readArguments(
|
|||||||
allow_repeated_settings = true;
|
allow_repeated_settings = true;
|
||||||
else if (arg == "--allow_merge_tree_settings")
|
else if (arg == "--allow_merge_tree_settings")
|
||||||
allow_merge_tree_settings = true;
|
allow_merge_tree_settings = true;
|
||||||
|
else if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-'))
|
||||||
|
{
|
||||||
|
/// Transform the abbreviated syntax '--multiquery <SQL>' into the full syntax '--multiquery -q <SQL>'
|
||||||
|
++arg_num;
|
||||||
|
arg = argv[arg_num];
|
||||||
|
addMultiquery(arg, common_arguments);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
common_arguments.emplace_back(arg);
|
common_arguments.emplace_back(arg);
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,7 @@ require (
|
|||||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||||
github.com/containerd/containerd v1.6.17 // indirect
|
github.com/containerd/containerd v1.6.17 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/distribution/distribution v2.8.2+incompatible // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/docker v23.0.0+incompatible // indirect
|
github.com/docker/docker v23.0.0+incompatible // indirect
|
||||||
github.com/docker/go-units v0.5.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
|
@ -126,6 +126,8 @@ github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/distribution/distribution v2.8.2+incompatible h1:k9+4DKdOG+quPFZXT/mUsiQrGu9vYCp+dXpuPkuqhk8=
|
||||||
|
github.com/distribution/distribution v2.8.2+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc=
|
||||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v23.0.0+incompatible h1:L6c28tNyqZ4/ub9AZC9d5QUuunoHHfEH4/Ue+h/E5nE=
|
github.com/docker/docker v23.0.0+incompatible h1:L6c28tNyqZ4/ub9AZC9d5QUuunoHHfEH4/Ue+h/E5nE=
|
||||||
|
@ -69,6 +69,7 @@ if (BUILD_STANDALONE_KEEPER)
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/ProtocolServerAdapter.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/ProtocolServerAdapter.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/PrometheusRequestHandler.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/PrometheusRequestHandler.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/PrometheusMetricsWriter.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/PrometheusMetricsWriter.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/waitServersToFinish.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTPRequestHandlerFactoryMain.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTPRequestHandlerFactoryMain.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServer.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServer.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/ReadHeaders.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/ReadHeaders.cpp
|
||||||
|
@ -11,6 +11,9 @@
|
|||||||
#include <Core/ServerUUID.h>
|
#include <Core/ServerUUID.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <Common/ErrorHandlers.h>
|
#include <Common/ErrorHandlers.h>
|
||||||
|
#include <Common/assertProcessUserMatchesDataOwner.h>
|
||||||
|
#include <Common/makeSocketAddress.h>
|
||||||
|
#include <Server/waitServersToFinish.h>
|
||||||
#include <base/scope_guard.h>
|
#include <base/scope_guard.h>
|
||||||
#include <base/safeExit.h>
|
#include <base/safeExit.h>
|
||||||
#include <Poco/Net/NetException.h>
|
#include <Poco/Net/NetException.h>
|
||||||
@ -75,92 +78,9 @@ namespace ErrorCodes
|
|||||||
extern const int NO_ELEMENTS_IN_CONFIG;
|
extern const int NO_ELEMENTS_IN_CONFIG;
|
||||||
extern const int SUPPORT_IS_DISABLED;
|
extern const int SUPPORT_IS_DISABLED;
|
||||||
extern const int NETWORK_ERROR;
|
extern const int NETWORK_ERROR;
|
||||||
extern const int MISMATCHING_USERS_FOR_PROCESS_AND_DATA;
|
|
||||||
extern const int FAILED_TO_GETPWUID;
|
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
|
|
||||||
size_t waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
|
||||||
{
|
|
||||||
const size_t sleep_max_ms = 1000 * seconds_to_wait;
|
|
||||||
const size_t sleep_one_ms = 100;
|
|
||||||
size_t sleep_current_ms = 0;
|
|
||||||
size_t current_connections = 0;
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
current_connections = 0;
|
|
||||||
|
|
||||||
for (auto & server : servers)
|
|
||||||
{
|
|
||||||
server.stop();
|
|
||||||
current_connections += server.currentConnections();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!current_connections)
|
|
||||||
break;
|
|
||||||
|
|
||||||
sleep_current_ms += sleep_one_ms;
|
|
||||||
if (sleep_current_ms < sleep_max_ms)
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return current_connections;
|
|
||||||
}
|
|
||||||
|
|
||||||
Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log)
|
|
||||||
{
|
|
||||||
Poco::Net::SocketAddress socket_address;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
socket_address = Poco::Net::SocketAddress(host, port);
|
|
||||||
}
|
|
||||||
catch (const Poco::Net::DNSException & e)
|
|
||||||
{
|
|
||||||
const auto code = e.code();
|
|
||||||
if (code == EAI_FAMILY
|
|
||||||
#if defined(EAI_ADDRFAMILY)
|
|
||||||
|| code == EAI_ADDRFAMILY
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
{
|
|
||||||
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. "
|
|
||||||
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
|
|
||||||
"specify IPv4 address to listen in <listen_host> element of configuration "
|
|
||||||
"file. Example: <listen_host>0.0.0.0</listen_host>",
|
|
||||||
host, e.code(), e.message());
|
|
||||||
}
|
|
||||||
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
return socket_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string getUserName(uid_t user_id)
|
|
||||||
{
|
|
||||||
/// Try to convert user id into user name.
|
|
||||||
auto buffer_size = sysconf(_SC_GETPW_R_SIZE_MAX);
|
|
||||||
if (buffer_size <= 0)
|
|
||||||
buffer_size = 1024;
|
|
||||||
std::string buffer;
|
|
||||||
buffer.reserve(buffer_size);
|
|
||||||
|
|
||||||
struct passwd passwd_entry;
|
|
||||||
struct passwd * result = nullptr;
|
|
||||||
const auto error = getpwuid_r(user_id, &passwd_entry, buffer.data(), buffer_size, &result);
|
|
||||||
|
|
||||||
if (error)
|
|
||||||
throwFromErrno("Failed to find user name for " + toString(user_id), ErrorCodes::FAILED_TO_GETPWUID, error);
|
|
||||||
else if (result)
|
|
||||||
return result->pw_name;
|
|
||||||
return toString(user_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
Poco::Net::SocketAddress Keeper::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
Poco::Net::SocketAddress Keeper::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
||||||
{
|
{
|
||||||
auto address = makeSocketAddress(host, port, &logger());
|
auto address = makeSocketAddress(host, port, &logger());
|
||||||
@ -364,24 +284,7 @@ try
|
|||||||
std::filesystem::create_directories(path);
|
std::filesystem::create_directories(path);
|
||||||
|
|
||||||
/// Check that the process user id matches the owner of the data.
|
/// Check that the process user id matches the owner of the data.
|
||||||
const auto effective_user_id = geteuid();
|
assertProcessUserMatchesDataOwner(path, [&](const std::string & message){ LOG_WARNING(log, fmt::runtime(message)); });
|
||||||
struct stat statbuf;
|
|
||||||
if (stat(path.c_str(), &statbuf) == 0 && effective_user_id != statbuf.st_uid)
|
|
||||||
{
|
|
||||||
const auto effective_user = getUserName(effective_user_id);
|
|
||||||
const auto data_owner = getUserName(statbuf.st_uid);
|
|
||||||
std::string message = "Effective user of the process (" + effective_user +
|
|
||||||
") does not match the owner of the data (" + data_owner + ").";
|
|
||||||
if (effective_user_id == 0)
|
|
||||||
{
|
|
||||||
message += " Run under 'sudo -u " + data_owner + "'.";
|
|
||||||
throw Exception::createDeprecated(message, ErrorCodes::MISMATCHING_USERS_FOR_PROCESS_AND_DATA);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
LOG_WARNING(log, fmt::runtime(message));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
DB::ServerUUID::load(path + "/uuid", log);
|
DB::ServerUUID::load(path + "/uuid", log);
|
||||||
|
|
||||||
|
@ -818,8 +818,16 @@ void LocalServer::readArguments(int argc, char ** argv, Arguments & common_argum
|
|||||||
{
|
{
|
||||||
for (int arg_num = 1; arg_num < argc; ++arg_num)
|
for (int arg_num = 1; arg_num < argc; ++arg_num)
|
||||||
{
|
{
|
||||||
const char * arg = argv[arg_num];
|
std::string_view arg = argv[arg_num];
|
||||||
common_arguments.emplace_back(arg);
|
if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-'))
|
||||||
|
{
|
||||||
|
/// Transform the abbreviated syntax '--multiquery <SQL>' into the full syntax '--multiquery -q <SQL>'
|
||||||
|
++arg_num;
|
||||||
|
arg = argv[arg_num];
|
||||||
|
addMultiquery(arg, common_arguments);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
common_arguments.emplace_back(arg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,10 +4,10 @@ if (NOT(
|
|||||||
AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR
|
AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
set (COMPRESSOR "${CMAKE_BINARY_DIR}/native/utils/self-extracting-executable/pre_compressor")
|
set (COMPRESSOR "${PROJECT_BINARY_DIR}/native/utils/self-extracting-executable/pre_compressor")
|
||||||
set (DECOMPRESSOR "--decompressor=${CMAKE_BINARY_DIR}/utils/self-extracting-executable/decompressor")
|
set (DECOMPRESSOR "--decompressor=${PROJECT_BINARY_DIR}/utils/self-extracting-executable/decompressor")
|
||||||
else ()
|
else ()
|
||||||
set (COMPRESSOR "${CMAKE_BINARY_DIR}/utils/self-extracting-executable/compressor")
|
set (COMPRESSOR "${PROJECT_BINARY_DIR}/utils/self-extracting-executable/compressor")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
add_custom_target (self-extracting ALL
|
add_custom_target (self-extracting ALL
|
||||||
|
@ -39,6 +39,9 @@
|
|||||||
#include <Common/remapExecutable.h>
|
#include <Common/remapExecutable.h>
|
||||||
#include <Common/TLDListsHolder.h>
|
#include <Common/TLDListsHolder.h>
|
||||||
#include <Common/Config/AbstractConfigurationComparison.h>
|
#include <Common/Config/AbstractConfigurationComparison.h>
|
||||||
|
#include <Common/assertProcessUserMatchesDataOwner.h>
|
||||||
|
#include <Common/makeSocketAddress.h>
|
||||||
|
#include <Server/waitServersToFinish.h>
|
||||||
#include <Core/ServerUUID.h>
|
#include <Core/ServerUUID.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
@ -200,40 +203,6 @@ int mainEntryClickHouseServer(int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
|
|
||||||
size_t waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
|
||||||
{
|
|
||||||
const size_t sleep_max_ms = 1000 * seconds_to_wait;
|
|
||||||
const size_t sleep_one_ms = 100;
|
|
||||||
size_t sleep_current_ms = 0;
|
|
||||||
size_t current_connections = 0;
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
current_connections = 0;
|
|
||||||
|
|
||||||
for (auto & server : servers)
|
|
||||||
{
|
|
||||||
server.stop();
|
|
||||||
current_connections += server.currentConnections();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!current_connections)
|
|
||||||
break;
|
|
||||||
|
|
||||||
sleep_current_ms += sleep_one_ms;
|
|
||||||
if (sleep_current_ms < sleep_max_ms)
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(sleep_one_ms));
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return current_connections;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -244,8 +213,6 @@ namespace ErrorCodes
|
|||||||
extern const int ARGUMENT_OUT_OF_BOUND;
|
extern const int ARGUMENT_OUT_OF_BOUND;
|
||||||
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
||||||
extern const int INVALID_CONFIG_PARAMETER;
|
extern const int INVALID_CONFIG_PARAMETER;
|
||||||
extern const int FAILED_TO_GETPWUID;
|
|
||||||
extern const int MISMATCHING_USERS_FOR_PROCESS_AND_DATA;
|
|
||||||
extern const int NETWORK_ERROR;
|
extern const int NETWORK_ERROR;
|
||||||
extern const int CORRUPTED_DATA;
|
extern const int CORRUPTED_DATA;
|
||||||
}
|
}
|
||||||
@ -261,54 +228,6 @@ static std::string getCanonicalPath(std::string && path)
|
|||||||
return std::move(path);
|
return std::move(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string getUserName(uid_t user_id)
|
|
||||||
{
|
|
||||||
/// Try to convert user id into user name.
|
|
||||||
auto buffer_size = sysconf(_SC_GETPW_R_SIZE_MAX);
|
|
||||||
if (buffer_size <= 0)
|
|
||||||
buffer_size = 1024;
|
|
||||||
std::string buffer;
|
|
||||||
buffer.reserve(buffer_size);
|
|
||||||
|
|
||||||
struct passwd passwd_entry;
|
|
||||||
struct passwd * result = nullptr;
|
|
||||||
const auto error = getpwuid_r(user_id, &passwd_entry, buffer.data(), buffer_size, &result);
|
|
||||||
|
|
||||||
if (error)
|
|
||||||
throwFromErrno("Failed to find user name for " + toString(user_id), ErrorCodes::FAILED_TO_GETPWUID, error);
|
|
||||||
else if (result)
|
|
||||||
return result->pw_name;
|
|
||||||
return toString(user_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log)
|
|
||||||
{
|
|
||||||
Poco::Net::SocketAddress socket_address;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
socket_address = Poco::Net::SocketAddress(host, port);
|
|
||||||
}
|
|
||||||
catch (const Poco::Net::DNSException & e)
|
|
||||||
{
|
|
||||||
const auto code = e.code();
|
|
||||||
if (code == EAI_FAMILY
|
|
||||||
#if defined(EAI_ADDRFAMILY)
|
|
||||||
|| code == EAI_ADDRFAMILY
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
{
|
|
||||||
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. "
|
|
||||||
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
|
|
||||||
"specify IPv4 address to listen in <listen_host> element of configuration "
|
|
||||||
"file. Example: <listen_host>0.0.0.0</listen_host>",
|
|
||||||
host, e.code(), e.message());
|
|
||||||
}
|
|
||||||
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
return socket_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
Poco::Net::SocketAddress Server::socketBindListen(
|
Poco::Net::SocketAddress Server::socketBindListen(
|
||||||
const Poco::Util::AbstractConfiguration & config,
|
const Poco::Util::AbstractConfiguration & config,
|
||||||
Poco::Net::ServerSocket & socket,
|
Poco::Net::ServerSocket & socket,
|
||||||
@ -959,24 +878,7 @@ try
|
|||||||
std::string default_database = server_settings.default_database.toString();
|
std::string default_database = server_settings.default_database.toString();
|
||||||
|
|
||||||
/// Check that the process user id matches the owner of the data.
|
/// Check that the process user id matches the owner of the data.
|
||||||
const auto effective_user_id = geteuid();
|
assertProcessUserMatchesDataOwner(path_str, [&](const std::string & message){ global_context->addWarningMessage(message); });
|
||||||
struct stat statbuf;
|
|
||||||
if (stat(path_str.c_str(), &statbuf) == 0 && effective_user_id != statbuf.st_uid)
|
|
||||||
{
|
|
||||||
const auto effective_user = getUserName(effective_user_id);
|
|
||||||
const auto data_owner = getUserName(statbuf.st_uid);
|
|
||||||
std::string message = "Effective user of the process (" + effective_user +
|
|
||||||
") does not match the owner of the data (" + data_owner + ").";
|
|
||||||
if (effective_user_id == 0)
|
|
||||||
{
|
|
||||||
message += " Run under 'sudo -u " + data_owner + "'.";
|
|
||||||
throw Exception::createDeprecated(message, ErrorCodes::MISMATCHING_USERS_FOR_PROCESS_AND_DATA);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
global_context->addWarningMessage(message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
global_context->setPath(path_str);
|
global_context->setPath(path_str);
|
||||||
|
|
||||||
|
@ -719,8 +719,12 @@
|
|||||||
<!-- Default profile of settings. -->
|
<!-- Default profile of settings. -->
|
||||||
<default_profile>default</default_profile>
|
<default_profile>default</default_profile>
|
||||||
|
|
||||||
<!-- Comma-separated list of prefixes for user-defined settings. -->
|
<!-- Comma-separated list of prefixes for user-defined settings.
|
||||||
<custom_settings_prefixes></custom_settings_prefixes>
|
The server will allow to set these settings, and retrieve them with the getSetting function.
|
||||||
|
They are also logged in the query_log, similarly to other settings, but have no special effect.
|
||||||
|
The "SQL_" prefix is introduced for compatibility with MySQL - these settings are being set be Tableau.
|
||||||
|
-->
|
||||||
|
<custom_settings_prefixes>SQL_</custom_settings_prefixes>
|
||||||
|
|
||||||
<!-- System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). -->
|
<!-- System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on). -->
|
||||||
<!-- <system_profile>default</system_profile> -->
|
<!-- <system_profile>default</system_profile> -->
|
||||||
|
@ -121,7 +121,7 @@ AggregateFunctionPtr createAggregateFunctionGroupArraySample(
|
|||||||
|
|
||||||
void registerAggregateFunctionGroupArray(AggregateFunctionFactory & factory)
|
void registerAggregateFunctionGroupArray(AggregateFunctionFactory & factory)
|
||||||
{
|
{
|
||||||
AggregateFunctionProperties properties = { .returns_default_when_only_null = true, .is_order_dependent = true };
|
AggregateFunctionProperties properties = { .returns_default_when_only_null = false, .is_order_dependent = true };
|
||||||
|
|
||||||
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
|
factory.registerFunction("groupArray", { createAggregateFunctionGroupArray<false>, properties });
|
||||||
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
|
factory.registerFunction("groupArraySample", { createAggregateFunctionGroupArraySample, properties });
|
||||||
|
@ -72,7 +72,7 @@ public:
|
|||||||
{
|
{
|
||||||
/// Currently the only functions that returns not-NULL on all NULL arguments are count and uniq, and they returns UInt64.
|
/// Currently the only functions that returns not-NULL on all NULL arguments are count and uniq, and they returns UInt64.
|
||||||
if (properties.returns_default_when_only_null)
|
if (properties.returns_default_when_only_null)
|
||||||
return std::make_shared<AggregateFunctionNothing>(arguments, params, nested_function->getResultType());
|
return std::make_shared<AggregateFunctionNothing>(arguments, params, std::make_shared<DataTypeUInt64>());
|
||||||
else
|
else
|
||||||
return std::make_shared<AggregateFunctionNothing>(arguments, params, std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>()));
|
return std::make_shared<AggregateFunctionNothing>(arguments, params, std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>()));
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,9 @@ public:
|
|||||||
if (!query->hasGroupBy())
|
if (!query->hasGroupBy())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (query->isGroupByWithCube() || query->isGroupByWithRollup())
|
||||||
|
return;
|
||||||
|
|
||||||
auto & group_by = query->getGroupBy().getNodes();
|
auto & group_by = query->getGroupBy().getNodes();
|
||||||
if (query->isGroupByWithGroupingSets())
|
if (query->isGroupByWithGroupingSets())
|
||||||
{
|
{
|
||||||
|
@ -6355,7 +6355,7 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
|||||||
auto table_function_ast = table_function_node_typed.toAST();
|
auto table_function_ast = table_function_node_typed.toAST();
|
||||||
table_function_ptr->parseArguments(table_function_ast, scope_context);
|
table_function_ptr->parseArguments(table_function_ast, scope_context);
|
||||||
|
|
||||||
auto table_function_storage = table_function_ptr->execute(table_function_ast, scope_context, table_function_ptr->getName());
|
auto table_function_storage = scope_context->getQueryContext()->executeTableFunction(table_function_ast, table_function_ptr);
|
||||||
table_function_node_typed.resolve(std::move(table_function_ptr), std::move(table_function_storage), scope_context);
|
table_function_node_typed.resolve(std::move(table_function_ptr), std::move(table_function_storage), scope_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,6 +115,7 @@ namespace
|
|||||||
writeBinary(info.checksum, out);
|
writeBinary(info.checksum, out);
|
||||||
writeBinary(info.base_size, out);
|
writeBinary(info.base_size, out);
|
||||||
writeBinary(info.base_checksum, out);
|
writeBinary(info.base_checksum, out);
|
||||||
|
writeBinary(info.encrypted_by_disk, out);
|
||||||
/// We don't store `info.data_file_name` and `info.data_file_index` because they're determined automalically
|
/// We don't store `info.data_file_name` and `info.data_file_index` because they're determined automalically
|
||||||
/// after reading file infos for all the hosts (see the class BackupCoordinationFileInfos).
|
/// after reading file infos for all the hosts (see the class BackupCoordinationFileInfos).
|
||||||
}
|
}
|
||||||
@ -136,6 +137,7 @@ namespace
|
|||||||
readBinary(info.checksum, in);
|
readBinary(info.checksum, in);
|
||||||
readBinary(info.base_size, in);
|
readBinary(info.base_size, in);
|
||||||
readBinary(info.base_checksum, in);
|
readBinary(info.base_checksum, in);
|
||||||
|
readBinary(info.encrypted_by_disk, in);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -254,7 +256,10 @@ void BackupCoordinationRemote::removeAllNodes()
|
|||||||
|
|
||||||
void BackupCoordinationRemote::setStage(const String & new_stage, const String & message)
|
void BackupCoordinationRemote::setStage(const String & new_stage, const String & message)
|
||||||
{
|
{
|
||||||
stage_sync->set(current_host, new_stage, message);
|
if (is_internal)
|
||||||
|
stage_sync->set(current_host, new_stage, message);
|
||||||
|
else
|
||||||
|
stage_sync->set(current_host, new_stage, /* message */ "", /* all_hosts */ true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupCoordinationRemote::setError(const Exception & exception)
|
void BackupCoordinationRemote::setError(const Exception & exception)
|
||||||
@ -777,8 +782,8 @@ bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic<size_t> &)
|
|||||||
String status;
|
String status;
|
||||||
if (zk->tryGet(root_zookeeper_path + "/" + existing_backup_path + "/stage", status))
|
if (zk->tryGet(root_zookeeper_path + "/" + existing_backup_path + "/stage", status))
|
||||||
{
|
{
|
||||||
/// If status is not COMPLETED it could be because the backup failed, check if 'error' exists
|
/// Check if some other backup is in progress
|
||||||
if (status != Stage::COMPLETED && !zk->exists(root_zookeeper_path + "/" + existing_backup_path + "/error"))
|
if (status == Stage::SCHEDULED_TO_START)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Found a concurrent backup: {}, current backup: {}", existing_backup_uuid, toString(backup_uuid));
|
LOG_WARNING(log, "Found a concurrent backup: {}, current backup: {}", existing_backup_uuid, toString(backup_uuid));
|
||||||
result = true;
|
result = true;
|
||||||
|
@ -43,6 +43,10 @@ namespace BackupCoordinationStage
|
|||||||
|
|
||||||
/// Coordination stage meaning that a host finished its work.
|
/// Coordination stage meaning that a host finished its work.
|
||||||
constexpr const char * COMPLETED = "completed";
|
constexpr const char * COMPLETED = "completed";
|
||||||
|
|
||||||
|
/// Coordination stage meaning that backup/restore has failed due to an error
|
||||||
|
/// Check '/error' for the error message
|
||||||
|
constexpr const char * ERROR = "error";
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -8,11 +8,13 @@
|
|||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <Backups/BackupCoordinationStage.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace Stage = BackupCoordinationStage;
|
||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
|
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
|
||||||
@ -42,7 +44,7 @@ void BackupCoordinationStageSync::createRootNodes()
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupCoordinationStageSync::set(const String & current_host, const String & new_stage, const String & message)
|
void BackupCoordinationStageSync::set(const String & current_host, const String & new_stage, const String & message, const bool & all_hosts)
|
||||||
{
|
{
|
||||||
auto holder = with_retries.createRetriesControlHolder("set");
|
auto holder = with_retries.createRetriesControlHolder("set");
|
||||||
holder.retries_ctl.retryLoop(
|
holder.retries_ctl.retryLoop(
|
||||||
@ -50,14 +52,23 @@ void BackupCoordinationStageSync::set(const String & current_host, const String
|
|||||||
{
|
{
|
||||||
with_retries.renewZooKeeper(zookeeper);
|
with_retries.renewZooKeeper(zookeeper);
|
||||||
|
|
||||||
/// Make an ephemeral node so the initiator can track if the current host is still working.
|
if (all_hosts)
|
||||||
String alive_node_path = zookeeper_path + "/alive|" + current_host;
|
{
|
||||||
auto code = zookeeper->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral);
|
auto code = zookeeper->trySet(zookeeper_path, new_stage);
|
||||||
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS)
|
if (code != Coordination::Error::ZOK)
|
||||||
throw zkutil::KeeperException(code, alive_node_path);
|
throw zkutil::KeeperException(code, zookeeper_path);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// Make an ephemeral node so the initiator can track if the current host is still working.
|
||||||
|
String alive_node_path = zookeeper_path + "/alive|" + current_host;
|
||||||
|
auto code = zookeeper->tryCreate(alive_node_path, "", zkutil::CreateMode::Ephemeral);
|
||||||
|
if (code != Coordination::Error::ZOK && code != Coordination::Error::ZNODEEXISTS)
|
||||||
|
throw zkutil::KeeperException(code, alive_node_path);
|
||||||
|
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, "");
|
zookeeper->createIfNotExists(zookeeper_path + "/started|" + current_host, "");
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/current|" + current_host + "|" + new_stage, message);
|
zookeeper->createIfNotExists(zookeeper_path + "/current|" + current_host + "|" + new_stage, message);
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,6 +84,10 @@ void BackupCoordinationStageSync::setError(const String & current_host, const Ex
|
|||||||
writeStringBinary(current_host, buf);
|
writeStringBinary(current_host, buf);
|
||||||
writeException(exception, buf, true);
|
writeException(exception, buf, true);
|
||||||
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
zookeeper->createIfNotExists(zookeeper_path + "/error", buf.str());
|
||||||
|
|
||||||
|
auto code = zookeeper->trySet(zookeeper_path, Stage::ERROR);
|
||||||
|
if (code != Coordination::Error::ZOK)
|
||||||
|
throw zkutil::KeeperException(code, zookeeper_path);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ public:
|
|||||||
Poco::Logger * log_);
|
Poco::Logger * log_);
|
||||||
|
|
||||||
/// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that.
|
/// Sets the stage of the current host and signal other hosts if there were other hosts waiting for that.
|
||||||
void set(const String & current_host, const String & new_stage, const String & message);
|
void set(const String & current_host, const String & new_stage, const String & message, const bool & all_hosts = false);
|
||||||
void setError(const String & current_host, const Exception & exception);
|
void setError(const String & current_host, const Exception & exception);
|
||||||
|
|
||||||
/// Sets the stage of the current host and waits until all hosts come to the same stage.
|
/// Sets the stage of the current host and waits until all hosts come to the same stage.
|
||||||
|
@ -1,26 +1,45 @@
|
|||||||
#include <Backups/BackupEntryFromAppendOnlyFile.h>
|
#include <Backups/BackupEntryFromAppendOnlyFile.h>
|
||||||
|
#include <Disks/IDisk.h>
|
||||||
#include <IO/LimitSeekableReadBuffer.h>
|
#include <IO/LimitSeekableReadBuffer.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
/// For append-only files we must calculate its size on the construction of a backup entry.
|
||||||
|
UInt64 calculateSize(const DiskPtr & disk, const String & file_path, bool copy_encrypted, std::optional<UInt64> unencrypted_file_size)
|
||||||
|
{
|
||||||
|
if (!unencrypted_file_size)
|
||||||
|
return copy_encrypted ? disk->getEncryptedFileSize(file_path) : disk->getFileSize(file_path);
|
||||||
|
else if (copy_encrypted)
|
||||||
|
return disk->getEncryptedFileSize(*unencrypted_file_size);
|
||||||
|
else
|
||||||
|
return *unencrypted_file_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
BackupEntryFromAppendOnlyFile::BackupEntryFromAppendOnlyFile(
|
BackupEntryFromAppendOnlyFile::BackupEntryFromAppendOnlyFile(
|
||||||
const DiskPtr & disk_,
|
const DiskPtr & disk_, const String & file_path_, bool copy_encrypted_, const std::optional<UInt64> & file_size_)
|
||||||
const String & file_path_,
|
: disk(disk_)
|
||||||
const ReadSettings & settings_,
|
, file_path(file_path_)
|
||||||
const std::optional<UInt64> & file_size_,
|
, data_source_description(disk->getDataSourceDescription())
|
||||||
const std::optional<UInt128> & checksum_,
|
, copy_encrypted(copy_encrypted_ && data_source_description.is_encrypted)
|
||||||
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_)
|
, size(calculateSize(disk_, file_path_, copy_encrypted, file_size_))
|
||||||
: BackupEntryFromImmutableFile(disk_, file_path_, settings_, file_size_, checksum_, temporary_file_)
|
|
||||||
, limit(BackupEntryFromImmutableFile::getSize())
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> BackupEntryFromAppendOnlyFile::getReadBuffer() const
|
BackupEntryFromAppendOnlyFile::~BackupEntryFromAppendOnlyFile() = default;
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> BackupEntryFromAppendOnlyFile::getReadBuffer(const ReadSettings & read_settings) const
|
||||||
{
|
{
|
||||||
auto buf = BackupEntryFromImmutableFile::getReadBuffer();
|
std::unique_ptr<SeekableReadBuffer> buf;
|
||||||
return std::make_unique<LimitSeekableReadBuffer>(std::move(buf), 0, limit);
|
if (copy_encrypted)
|
||||||
|
buf = disk->readEncryptedFile(file_path, read_settings.adjustBufferSize(size));
|
||||||
|
else
|
||||||
|
buf = disk->readFile(file_path, read_settings.adjustBufferSize(size));
|
||||||
|
return std::make_unique<LimitSeekableReadBuffer>(std::move(buf), 0, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Backups/BackupEntryFromImmutableFile.h>
|
#include <Backups/BackupEntryWithChecksumCalculation.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -8,24 +8,34 @@ namespace DB
|
|||||||
|
|
||||||
/// Represents a file prepared to be included in a backup, assuming that until this backup entry is destroyed
|
/// Represents a file prepared to be included in a backup, assuming that until this backup entry is destroyed
|
||||||
/// the file can be appended with new data, but the bytes which are already in the file won't be changed.
|
/// the file can be appended with new data, but the bytes which are already in the file won't be changed.
|
||||||
class BackupEntryFromAppendOnlyFile : public BackupEntryFromImmutableFile
|
class BackupEntryFromAppendOnlyFile : public BackupEntryWithChecksumCalculation<IBackupEntry>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
/// The constructor is allowed to not set `file_size_`, in that case it will be calculated from the data.
|
||||||
/// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data.
|
|
||||||
BackupEntryFromAppendOnlyFile(
|
BackupEntryFromAppendOnlyFile(
|
||||||
const DiskPtr & disk_,
|
const DiskPtr & disk_,
|
||||||
const String & file_path_,
|
const String & file_path_,
|
||||||
const ReadSettings & settings_,
|
bool copy_encrypted_ = false,
|
||||||
const std::optional<UInt64> & file_size_ = {},
|
const std::optional<UInt64> & file_size_ = {});
|
||||||
const std::optional<UInt128> & checksum_ = {},
|
|
||||||
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_ = {});
|
|
||||||
|
|
||||||
UInt64 getSize() const override { return limit; }
|
~BackupEntryFromAppendOnlyFile() override;
|
||||||
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override;
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const override;
|
||||||
|
UInt64 getSize() const override { return size; }
|
||||||
|
|
||||||
|
DataSourceDescription getDataSourceDescription() const override { return data_source_description; }
|
||||||
|
bool isEncryptedByDisk() const override { return copy_encrypted; }
|
||||||
|
|
||||||
|
bool isFromFile() const override { return true; }
|
||||||
|
DiskPtr getDisk() const override { return disk; }
|
||||||
|
String getFilePath() const override { return file_path; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const UInt64 limit;
|
const DiskPtr disk;
|
||||||
|
const String file_path;
|
||||||
|
const DataSourceDescription data_source_description;
|
||||||
|
const bool copy_encrypted;
|
||||||
|
const UInt64 size;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,53 +1,84 @@
|
|||||||
#include <Backups/BackupEntryFromImmutableFile.h>
|
#include <Backups/BackupEntryFromImmutableFile.h>
|
||||||
#include <Disks/IDisk.h>
|
#include <Disks/IDisk.h>
|
||||||
#include <Disks/IO/createReadBufferFromFileBase.h>
|
|
||||||
#include <Poco/File.h>
|
|
||||||
#include <Common/filesystemHelpers.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
/// We mix the checksum calculated for non-encrypted data with IV generated to encrypt the file
|
||||||
|
/// to generate kind of a checksum for encrypted data. Of course it differs from the CityHash properly calculated for encrypted data.
|
||||||
|
UInt128 combineChecksums(UInt128 checksum1, UInt128 checksum2)
|
||||||
|
{
|
||||||
|
chassert(std::size(checksum2.items) == 2);
|
||||||
|
return CityHash_v1_0_2::CityHash128WithSeed(reinterpret_cast<const char *>(&checksum1), sizeof(checksum1), {checksum2.items[0], checksum2.items[1]});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
BackupEntryFromImmutableFile::BackupEntryFromImmutableFile(
|
BackupEntryFromImmutableFile::BackupEntryFromImmutableFile(
|
||||||
const DiskPtr & disk_,
|
const DiskPtr & disk_,
|
||||||
const String & file_path_,
|
const String & file_path_,
|
||||||
const ReadSettings & settings_,
|
bool copy_encrypted_,
|
||||||
const std::optional<UInt64> & file_size_,
|
const std::optional<UInt64> & file_size_,
|
||||||
const std::optional<UInt128> & checksum_,
|
const std::optional<UInt128> & checksum_)
|
||||||
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_)
|
|
||||||
: disk(disk_)
|
: disk(disk_)
|
||||||
, file_path(file_path_)
|
, file_path(file_path_)
|
||||||
, settings(settings_)
|
, data_source_description(disk->getDataSourceDescription())
|
||||||
|
, copy_encrypted(copy_encrypted_ && data_source_description.is_encrypted)
|
||||||
, file_size(file_size_)
|
, file_size(file_size_)
|
||||||
, checksum(checksum_)
|
, checksum(checksum_)
|
||||||
, temporary_file_on_disk(temporary_file_)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
BackupEntryFromImmutableFile::~BackupEntryFromImmutableFile() = default;
|
BackupEntryFromImmutableFile::~BackupEntryFromImmutableFile() = default;
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> BackupEntryFromImmutableFile::getReadBuffer(const ReadSettings & read_settings) const
|
||||||
|
{
|
||||||
|
if (copy_encrypted)
|
||||||
|
return disk->readEncryptedFile(file_path, read_settings);
|
||||||
|
else
|
||||||
|
return disk->readFile(file_path, read_settings);
|
||||||
|
}
|
||||||
|
|
||||||
UInt64 BackupEntryFromImmutableFile::getSize() const
|
UInt64 BackupEntryFromImmutableFile::getSize() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock{get_file_size_mutex};
|
std::lock_guard lock{size_and_checksum_mutex};
|
||||||
if (!file_size)
|
if (!file_size_adjusted)
|
||||||
file_size = disk->getFileSize(file_path);
|
{
|
||||||
|
if (!file_size)
|
||||||
|
file_size = copy_encrypted ? disk->getEncryptedFileSize(file_path) : disk->getFileSize(file_path);
|
||||||
|
else if (copy_encrypted)
|
||||||
|
file_size = disk->getEncryptedFileSize(*file_size);
|
||||||
|
file_size_adjusted = true;
|
||||||
|
}
|
||||||
return *file_size;
|
return *file_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> BackupEntryFromImmutableFile::getReadBuffer() const
|
UInt128 BackupEntryFromImmutableFile::getChecksum() const
|
||||||
{
|
{
|
||||||
return disk->readFile(file_path, settings);
|
std::lock_guard lock{size_and_checksum_mutex};
|
||||||
|
if (!checksum_adjusted)
|
||||||
|
{
|
||||||
|
if (!checksum)
|
||||||
|
checksum = BackupEntryWithChecksumCalculation<IBackupEntry>::getChecksum();
|
||||||
|
else if (copy_encrypted)
|
||||||
|
checksum = combineChecksums(*checksum, disk->getEncryptedFileIV(file_path));
|
||||||
|
checksum_adjusted = true;
|
||||||
|
}
|
||||||
|
return *checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<UInt128> BackupEntryFromImmutableFile::getPartialChecksum(size_t prefix_length) const
|
||||||
DataSourceDescription BackupEntryFromImmutableFile::getDataSourceDescription() const
|
|
||||||
{
|
{
|
||||||
return disk->getDataSourceDescription();
|
if (prefix_length == 0)
|
||||||
}
|
return 0;
|
||||||
|
|
||||||
String BackupEntryFromImmutableFile::getFilePath() const
|
if (prefix_length >= getSize())
|
||||||
{
|
return getChecksum();
|
||||||
return file_path;
|
|
||||||
|
/// For immutable files we don't use partial checksums.
|
||||||
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,49 +1,53 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Backups/IBackupEntry.h>
|
#include <Backups/BackupEntryWithChecksumCalculation.h>
|
||||||
#include <IO/ReadSettings.h>
|
|
||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
class TemporaryFileOnDisk;
|
|
||||||
class IDisk;
|
class IDisk;
|
||||||
using DiskPtr = std::shared_ptr<IDisk>;
|
using DiskPtr = std::shared_ptr<IDisk>;
|
||||||
|
|
||||||
/// Represents a file prepared to be included in a backup, assuming that until this backup entry is destroyed the file won't be changed.
|
/// Represents a file prepared to be included in a backup, assuming that until this backup entry is destroyed the file won't be changed.
|
||||||
class BackupEntryFromImmutableFile : public IBackupEntry
|
class BackupEntryFromImmutableFile : public BackupEntryWithChecksumCalculation<IBackupEntry>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data.
|
/// The constructor is allowed to not set `file_size_` or `checksum_`, in that case it will be calculated from the data.
|
||||||
BackupEntryFromImmutableFile(
|
BackupEntryFromImmutableFile(
|
||||||
const DiskPtr & disk_,
|
const DiskPtr & disk_,
|
||||||
const String & file_path_,
|
const String & file_path_,
|
||||||
const ReadSettings & settings_,
|
bool copy_encrypted_ = false,
|
||||||
const std::optional<UInt64> & file_size_ = {},
|
const std::optional<UInt64> & file_size_ = {},
|
||||||
const std::optional<UInt128> & checksum_ = {},
|
const std::optional<UInt128> & checksum_ = {});
|
||||||
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_ = {});
|
|
||||||
|
|
||||||
~BackupEntryFromImmutableFile() override;
|
~BackupEntryFromImmutableFile() override;
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const override;
|
||||||
|
|
||||||
UInt64 getSize() const override;
|
UInt64 getSize() const override;
|
||||||
std::optional<UInt128> getChecksum() const override { return checksum; }
|
UInt128 getChecksum() const override;
|
||||||
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override;
|
std::optional<UInt128> getPartialChecksum(size_t prefix_length) const override;
|
||||||
|
|
||||||
String getFilePath() const override;
|
DataSourceDescription getDataSourceDescription() const override { return data_source_description; }
|
||||||
DataSourceDescription getDataSourceDescription() const override;
|
bool isEncryptedByDisk() const override { return copy_encrypted; }
|
||||||
|
|
||||||
DiskPtr tryGetDiskIfExists() const override { return disk; }
|
bool isFromFile() const override { return true; }
|
||||||
|
bool isFromImmutableFile() const override { return true; }
|
||||||
|
DiskPtr getDisk() const override { return disk; }
|
||||||
|
String getFilePath() const override { return file_path; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const DiskPtr disk;
|
const DiskPtr disk;
|
||||||
const String file_path;
|
const String file_path;
|
||||||
ReadSettings settings;
|
const DataSourceDescription data_source_description;
|
||||||
mutable std::optional<UInt64> file_size TSA_GUARDED_BY(get_file_size_mutex);
|
const bool copy_encrypted;
|
||||||
mutable std::mutex get_file_size_mutex;
|
mutable std::optional<UInt64> file_size;
|
||||||
const std::optional<UInt128> checksum;
|
mutable std::optional<UInt64> checksum;
|
||||||
const std::shared_ptr<TemporaryFileOnDisk> temporary_file_on_disk;
|
mutable bool file_size_adjusted = false;
|
||||||
|
mutable bool checksum_adjusted = false;
|
||||||
|
mutable std::mutex size_and_checksum_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -5,17 +5,16 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
BackupEntryFromMemory::BackupEntryFromMemory(const void * data_, size_t size_, const std::optional<UInt128> & checksum_)
|
BackupEntryFromMemory::BackupEntryFromMemory(const void * data_, size_t size_)
|
||||||
: BackupEntryFromMemory(String{reinterpret_cast<const char *>(data_), size_}, checksum_)
|
: BackupEntryFromMemory(String{reinterpret_cast<const char *>(data_), size_})
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
BackupEntryFromMemory::BackupEntryFromMemory(String data_, const std::optional<UInt128> & checksum_)
|
BackupEntryFromMemory::BackupEntryFromMemory(String data_) : data(std::move(data_))
|
||||||
: data(std::move(data_)), checksum(checksum_)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> BackupEntryFromMemory::getReadBuffer() const
|
std::unique_ptr<SeekableReadBuffer> BackupEntryFromMemory::getReadBuffer(const ReadSettings &) const
|
||||||
{
|
{
|
||||||
return std::make_unique<ReadBufferFromString>(data);
|
return std::make_unique<ReadBufferFromString>(data);
|
||||||
}
|
}
|
||||||
|
@ -1,39 +1,26 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Backups/IBackupEntry.h>
|
#include <Backups/BackupEntryWithChecksumCalculation.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
/// Represents small preloaded data to be included in a backup.
|
/// Represents small preloaded data to be included in a backup.
|
||||||
class BackupEntryFromMemory : public IBackupEntry
|
class BackupEntryFromMemory : public BackupEntryWithChecksumCalculation<IBackupEntry>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data.
|
/// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data.
|
||||||
BackupEntryFromMemory(const void * data_, size_t size_, const std::optional<UInt128> & checksum_ = {});
|
BackupEntryFromMemory(const void * data_, size_t size_);
|
||||||
explicit BackupEntryFromMemory(String data_, const std::optional<UInt128> & checksum_ = {});
|
explicit BackupEntryFromMemory(String data_);
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings &) const override;
|
||||||
UInt64 getSize() const override { return data.size(); }
|
UInt64 getSize() const override { return data.size(); }
|
||||||
std::optional<UInt128> getChecksum() const override { return checksum; }
|
|
||||||
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override;
|
|
||||||
|
|
||||||
String getFilePath() const override
|
DataSourceDescription getDataSourceDescription() const override { return DataSourceDescription{DataSourceType::RAM, "", false, false}; }
|
||||||
{
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
DataSourceDescription getDataSourceDescription() const override
|
|
||||||
{
|
|
||||||
return DataSourceDescription{DataSourceType::RAM, "", false, false};
|
|
||||||
}
|
|
||||||
|
|
||||||
DiskPtr tryGetDiskIfExists() const override { return nullptr; }
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const String data;
|
const String data;
|
||||||
const std::optional<UInt128> checksum;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
#include <Backups/BackupEntryFromSmallFile.h>
|
#include <Backups/BackupEntryFromSmallFile.h>
|
||||||
|
#include <Common/filesystemHelpers.h>
|
||||||
|
#include <Disks/DiskLocal.h>
|
||||||
#include <Disks/IDisk.h>
|
#include <Disks/IDisk.h>
|
||||||
#include <Disks/IO/createReadBufferFromFileBase.h>
|
#include <Disks/IO/createReadBufferFromFileBase.h>
|
||||||
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/ReadHelpers.h>
|
#include <IO/ReadHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
@ -16,9 +19,9 @@ namespace
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
String readFile(const DiskPtr & disk, const String & file_path)
|
String readFile(const DiskPtr & disk, const String & file_path, bool copy_encrypted)
|
||||||
{
|
{
|
||||||
auto buf = disk->readFile(file_path);
|
auto buf = copy_encrypted ? disk->readEncryptedFile(file_path, {}) : disk->readFile(file_path);
|
||||||
String s;
|
String s;
|
||||||
readStringUntilEOF(s, *buf);
|
readStringUntilEOF(s, *buf);
|
||||||
return s;
|
return s;
|
||||||
@ -26,15 +29,25 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BackupEntryFromSmallFile::BackupEntryFromSmallFile(const String & file_path_, const std::optional<UInt128> & checksum_)
|
BackupEntryFromSmallFile::BackupEntryFromSmallFile(const String & file_path_)
|
||||||
: BackupEntryFromMemory(readFile(file_path_), checksum_), file_path(file_path_)
|
: file_path(file_path_)
|
||||||
|
, data_source_description(DiskLocal::getLocalDataSourceDescription(file_path_))
|
||||||
|
, data(readFile(file_path_))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
BackupEntryFromSmallFile::BackupEntryFromSmallFile(
|
BackupEntryFromSmallFile::BackupEntryFromSmallFile(const DiskPtr & disk_, const String & file_path_, bool copy_encrypted_)
|
||||||
const DiskPtr & disk_, const String & file_path_, const std::optional<UInt128> & checksum_)
|
: disk(disk_)
|
||||||
: BackupEntryFromMemory(readFile(disk_, file_path_), checksum_), disk(disk_), file_path(file_path_)
|
, file_path(file_path_)
|
||||||
|
, data_source_description(disk_->getDataSourceDescription())
|
||||||
|
, copy_encrypted(copy_encrypted_ && data_source_description.is_encrypted)
|
||||||
|
, data(readFile(disk_, file_path, copy_encrypted))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> BackupEntryFromSmallFile::getReadBuffer(const ReadSettings &) const
|
||||||
|
{
|
||||||
|
return std::make_unique<ReadBufferFromString>(data);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Backups/BackupEntryFromMemory.h>
|
#include <Backups/BackupEntryWithChecksumCalculation.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -10,25 +10,28 @@ using DiskPtr = std::shared_ptr<IDisk>;
|
|||||||
|
|
||||||
/// Represents a file prepared to be included in a backup,
|
/// Represents a file prepared to be included in a backup,
|
||||||
/// assuming that the file is small and can be easily loaded into memory.
|
/// assuming that the file is small and can be easily loaded into memory.
|
||||||
class BackupEntryFromSmallFile : public BackupEntryFromMemory
|
class BackupEntryFromSmallFile : public BackupEntryWithChecksumCalculation<IBackupEntry>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/// The constructor is allowed to not set `checksum_`, in that case it will be calculated from the data.
|
explicit BackupEntryFromSmallFile(const String & file_path_);
|
||||||
explicit BackupEntryFromSmallFile(
|
BackupEntryFromSmallFile(const DiskPtr & disk_, const String & file_path_, bool copy_encrypted_ = false);
|
||||||
const String & file_path_,
|
|
||||||
const std::optional<UInt128> & checksum_ = {});
|
|
||||||
|
|
||||||
BackupEntryFromSmallFile(
|
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings &) const override;
|
||||||
const DiskPtr & disk_,
|
UInt64 getSize() const override { return data.size(); }
|
||||||
const String & file_path_,
|
|
||||||
const std::optional<UInt128> & checksum_ = {});
|
|
||||||
|
|
||||||
|
DataSourceDescription getDataSourceDescription() const override { return data_source_description; }
|
||||||
|
bool isEncryptedByDisk() const override { return copy_encrypted; }
|
||||||
|
|
||||||
|
bool isFromFile() const override { return true; }
|
||||||
|
DiskPtr getDisk() const override { return disk; }
|
||||||
String getFilePath() const override { return file_path; }
|
String getFilePath() const override { return file_path; }
|
||||||
|
|
||||||
DiskPtr tryGetDiskIfExists() const override { return disk; }
|
|
||||||
private:
|
private:
|
||||||
const DiskPtr disk;
|
const DiskPtr disk;
|
||||||
const String file_path;
|
const String file_path;
|
||||||
|
const DataSourceDescription data_source_description;
|
||||||
|
const bool copy_encrypted = false;
|
||||||
|
const String data;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
54
src/Backups/BackupEntryWithChecksumCalculation.cpp
Normal file
54
src/Backups/BackupEntryWithChecksumCalculation.cpp
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
#include <Backups/BackupEntryWithChecksumCalculation.h>
|
||||||
|
#include <IO/HashingReadBuffer.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
template <typename Base>
|
||||||
|
UInt128 BackupEntryWithChecksumCalculation<Base>::getChecksum() const
|
||||||
|
{
|
||||||
|
std::lock_guard lock{checksum_calculation_mutex};
|
||||||
|
if (!calculated_checksum)
|
||||||
|
{
|
||||||
|
auto read_buffer = this->getReadBuffer(ReadSettings{}.adjustBufferSize(this->getSize()));
|
||||||
|
HashingReadBuffer hashing_read_buffer(*read_buffer);
|
||||||
|
hashing_read_buffer.ignoreAll();
|
||||||
|
calculated_checksum = hashing_read_buffer.getHash();
|
||||||
|
}
|
||||||
|
return *calculated_checksum;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Base>
|
||||||
|
std::optional<UInt128> BackupEntryWithChecksumCalculation<Base>::getPartialChecksum(size_t prefix_length) const
|
||||||
|
{
|
||||||
|
if (prefix_length == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
size_t size = this->getSize();
|
||||||
|
if (prefix_length >= size)
|
||||||
|
return this->getChecksum();
|
||||||
|
|
||||||
|
std::lock_guard lock{checksum_calculation_mutex};
|
||||||
|
|
||||||
|
ReadSettings read_settings;
|
||||||
|
if (calculated_checksum)
|
||||||
|
read_settings.adjustBufferSize(calculated_checksum ? prefix_length : size);
|
||||||
|
|
||||||
|
auto read_buffer = this->getReadBuffer(read_settings);
|
||||||
|
HashingReadBuffer hashing_read_buffer(*read_buffer);
|
||||||
|
hashing_read_buffer.ignore(prefix_length);
|
||||||
|
auto partial_checksum = hashing_read_buffer.getHash();
|
||||||
|
|
||||||
|
if (!calculated_checksum)
|
||||||
|
{
|
||||||
|
hashing_read_buffer.ignoreAll();
|
||||||
|
calculated_checksum = hashing_read_buffer.getHash();
|
||||||
|
}
|
||||||
|
|
||||||
|
return partial_checksum;
|
||||||
|
}
|
||||||
|
|
||||||
|
template class BackupEntryWithChecksumCalculation<IBackupEntry>;
|
||||||
|
|
||||||
|
}
|
22
src/Backups/BackupEntryWithChecksumCalculation.h
Normal file
22
src/Backups/BackupEntryWithChecksumCalculation.h
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Backups/IBackupEntry.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// Calculates the checksum and the partial checksum for a backup entry based on ReadBuffer returned by getReadBuffer().
|
||||||
|
template <typename Base>
|
||||||
|
class BackupEntryWithChecksumCalculation : public Base
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
UInt128 getChecksum() const override;
|
||||||
|
std::optional<UInt128> getPartialChecksum(size_t prefix_length) const override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
mutable std::optional<UInt128> calculated_checksum;
|
||||||
|
mutable std::mutex checksum_calculation_mutex;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -15,23 +15,33 @@ public:
|
|||||||
BackupEntryWrappedWith(BackupEntryPtr entry_, T && custom_value_) : entry(entry_), custom_value(std::move(custom_value_)) { }
|
BackupEntryWrappedWith(BackupEntryPtr entry_, T && custom_value_) : entry(entry_), custom_value(std::move(custom_value_)) { }
|
||||||
~BackupEntryWrappedWith() override = default;
|
~BackupEntryWrappedWith() override = default;
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const override { return entry->getReadBuffer(read_settings); }
|
||||||
UInt64 getSize() const override { return entry->getSize(); }
|
UInt64 getSize() const override { return entry->getSize(); }
|
||||||
std::optional<UInt128> getChecksum() const override { return entry->getChecksum(); }
|
UInt128 getChecksum() const override { return entry->getChecksum(); }
|
||||||
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override { return entry->getReadBuffer(); }
|
std::optional<UInt128> getPartialChecksum(size_t prefix_length) const override { return entry->getPartialChecksum(prefix_length); }
|
||||||
String getFilePath() const override { return entry->getFilePath(); }
|
|
||||||
DiskPtr tryGetDiskIfExists() const override { return entry->tryGetDiskIfExists(); }
|
|
||||||
DataSourceDescription getDataSourceDescription() const override { return entry->getDataSourceDescription(); }
|
DataSourceDescription getDataSourceDescription() const override { return entry->getDataSourceDescription(); }
|
||||||
|
bool isEncryptedByDisk() const override { return entry->isEncryptedByDisk(); }
|
||||||
|
bool isFromFile() const override { return entry->isFromFile(); }
|
||||||
|
bool isFromImmutableFile() const override { return entry->isFromImmutableFile(); }
|
||||||
|
String getFilePath() const override { return entry->getFilePath(); }
|
||||||
|
DiskPtr getDisk() const override { return entry->getDisk(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
BackupEntryPtr entry;
|
BackupEntryPtr entry;
|
||||||
T custom_value;
|
T custom_value;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
BackupEntryPtr wrapBackupEntryWith(BackupEntryPtr && backup_entry, const T & custom_value)
|
||||||
|
{
|
||||||
|
return std::make_shared<BackupEntryWrappedWith<T>>(std::move(backup_entry), custom_value);
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void wrapBackupEntriesWith(std::vector<std::pair<String, BackupEntryPtr>> & backup_entries, const T & custom_value)
|
void wrapBackupEntriesWith(std::vector<std::pair<String, BackupEntryPtr>> & backup_entries, const T & custom_value)
|
||||||
{
|
{
|
||||||
for (auto & [_, backup_entry] : backup_entries)
|
for (auto & [_, backup_entry] : backup_entries)
|
||||||
backup_entry = std::make_shared<BackupEntryWrappedWith<T>>(std::move(backup_entry), custom_value);
|
backup_entry = wrapBackupEntryWith(std::move(backup_entry), custom_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include <Common/scope_guard_safe.h>
|
#include <Common/scope_guard_safe.h>
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
#include <IO/HashingReadBuffer.h>
|
#include <base/hex.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -36,7 +36,7 @@ namespace
|
|||||||
{
|
{
|
||||||
/// We cannot reuse base backup because our file is smaller
|
/// We cannot reuse base backup because our file is smaller
|
||||||
/// than file stored in previous backup
|
/// than file stored in previous backup
|
||||||
if (new_entry_info.size < base_backup_info.first)
|
if ((new_entry_info.size < base_backup_info.first) || !base_backup_info.first)
|
||||||
return CheckBackupResult::HasNothing;
|
return CheckBackupResult::HasNothing;
|
||||||
|
|
||||||
if (base_backup_info.first == new_entry_info.size)
|
if (base_backup_info.first == new_entry_info.size)
|
||||||
@ -48,45 +48,22 @@ namespace
|
|||||||
|
|
||||||
struct ChecksumsForNewEntry
|
struct ChecksumsForNewEntry
|
||||||
{
|
{
|
||||||
UInt128 full_checksum;
|
/// 0 is the valid checksum of empty data.
|
||||||
UInt128 prefix_checksum;
|
UInt128 full_checksum = 0;
|
||||||
|
|
||||||
|
/// std::nullopt here means that it's too difficult to calculate a partial checksum so it shouldn't be used.
|
||||||
|
std::optional<UInt128> prefix_checksum;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Calculate checksum for backup entry if it's empty.
|
/// Calculate checksum for backup entry if it's empty.
|
||||||
/// Also able to calculate additional checksum of some prefix.
|
/// Also able to calculate additional checksum of some prefix.
|
||||||
ChecksumsForNewEntry calculateNewEntryChecksumsIfNeeded(const BackupEntryPtr & entry, size_t prefix_size)
|
ChecksumsForNewEntry calculateNewEntryChecksumsIfNeeded(const BackupEntryPtr & entry, size_t prefix_size)
|
||||||
{
|
{
|
||||||
if (prefix_size > 0)
|
ChecksumsForNewEntry res;
|
||||||
{
|
/// The partial checksum should be calculated before the full checksum to enable optimization in BackupEntryWithChecksumCalculation.
|
||||||
auto read_buffer = entry->getReadBuffer();
|
res.prefix_checksum = entry->getPartialChecksum(prefix_size);
|
||||||
HashingReadBuffer hashing_read_buffer(*read_buffer);
|
res.full_checksum = entry->getChecksum();
|
||||||
hashing_read_buffer.ignore(prefix_size);
|
return res;
|
||||||
auto prefix_checksum = hashing_read_buffer.getHash();
|
|
||||||
if (entry->getChecksum() == std::nullopt)
|
|
||||||
{
|
|
||||||
hashing_read_buffer.ignoreAll();
|
|
||||||
auto full_checksum = hashing_read_buffer.getHash();
|
|
||||||
return ChecksumsForNewEntry{full_checksum, prefix_checksum};
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return ChecksumsForNewEntry{*(entry->getChecksum()), prefix_checksum};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (entry->getChecksum() == std::nullopt)
|
|
||||||
{
|
|
||||||
auto read_buffer = entry->getReadBuffer();
|
|
||||||
HashingReadBuffer hashing_read_buffer(*read_buffer);
|
|
||||||
hashing_read_buffer.ignoreAll();
|
|
||||||
return ChecksumsForNewEntry{hashing_read_buffer.getHash(), 0};
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return ChecksumsForNewEntry{*(entry->getChecksum()), 0};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// We store entries' file names in the backup without leading slashes.
|
/// We store entries' file names in the backup without leading slashes.
|
||||||
@ -111,6 +88,7 @@ String BackupFileInfo::describe() const
|
|||||||
result += fmt::format("base_checksum: {};\n", getHexUIntLowercase(checksum));
|
result += fmt::format("base_checksum: {};\n", getHexUIntLowercase(checksum));
|
||||||
result += fmt::format("data_file_name: {};\n", data_file_name);
|
result += fmt::format("data_file_name: {};\n", data_file_name);
|
||||||
result += fmt::format("data_file_index: {};\n", data_file_index);
|
result += fmt::format("data_file_index: {};\n", data_file_index);
|
||||||
|
result += fmt::format("encrypted_by_disk: {};\n", encrypted_by_disk);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,6 +100,7 @@ BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const Backu
|
|||||||
BackupFileInfo info;
|
BackupFileInfo info;
|
||||||
info.file_name = adjusted_path;
|
info.file_name = adjusted_path;
|
||||||
info.size = backup_entry->getSize();
|
info.size = backup_entry->getSize();
|
||||||
|
info.encrypted_by_disk = backup_entry->isEncryptedByDisk();
|
||||||
|
|
||||||
/// We don't set `info.data_file_name` and `info.data_file_index` in this function because they're set during backup coordination
|
/// We don't set `info.data_file_name` and `info.data_file_index` in this function because they're set during backup coordination
|
||||||
/// (see the class BackupCoordinationFileInfos).
|
/// (see the class BackupCoordinationFileInfos).
|
||||||
@ -139,7 +118,7 @@ BackupFileInfo buildFileInfoForBackupEntry(const String & file_name, const Backu
|
|||||||
|
|
||||||
/// We have info about this file in base backup
|
/// We have info about this file in base backup
|
||||||
/// If file has no checksum -- calculate and fill it.
|
/// If file has no checksum -- calculate and fill it.
|
||||||
if (base_backup_file_info.has_value())
|
if (base_backup_file_info)
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "File {} found in base backup, checking for equality", adjusted_path);
|
LOG_TRACE(log, "File {} found in base backup, checking for equality", adjusted_path);
|
||||||
CheckBackupResult check_base = checkBaseBackupForFile(*base_backup_file_info, info);
|
CheckBackupResult check_base = checkBaseBackupForFile(*base_backup_file_info, info);
|
||||||
|
@ -35,6 +35,9 @@ struct BackupFileInfo
|
|||||||
/// This field is set during backup coordination (see the class BackupCoordinationFileInfos).
|
/// This field is set during backup coordination (see the class BackupCoordinationFileInfos).
|
||||||
size_t data_file_index = static_cast<size_t>(-1);
|
size_t data_file_index = static_cast<size_t>(-1);
|
||||||
|
|
||||||
|
/// Whether this file is encrypted by an encrypted disk.
|
||||||
|
bool encrypted_by_disk = false;
|
||||||
|
|
||||||
struct LessByFileName
|
struct LessByFileName
|
||||||
{
|
{
|
||||||
bool operator()(const BackupFileInfo & lhs, const BackupFileInfo & rhs) const { return (lhs.file_name < rhs.file_name); }
|
bool operator()(const BackupFileInfo & lhs, const BackupFileInfo & rhs) const { return (lhs.file_name < rhs.file_name); }
|
||||||
|
@ -1,46 +0,0 @@
|
|||||||
#include <Backups/BackupIO.h>
|
|
||||||
|
|
||||||
#include <IO/copyData.h>
|
|
||||||
#include <IO/WriteBufferFromFileBase.h>
|
|
||||||
#include <IO/SeekableReadBuffer.h>
|
|
||||||
#include <Interpreters/Context.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int NOT_IMPLEMENTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
void IBackupReader::copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
|
|
||||||
WriteMode write_mode, const WriteSettings & write_settings)
|
|
||||||
{
|
|
||||||
auto read_buffer = readFile(file_name);
|
|
||||||
auto write_buffer = destination_disk->writeFile(destination_path, std::min<size_t>(size, DBMS_DEFAULT_BUFFER_SIZE), write_mode, write_settings);
|
|
||||||
copyData(*read_buffer, *write_buffer, size);
|
|
||||||
write_buffer->finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
IBackupWriter::IBackupWriter(const ContextPtr & context_)
|
|
||||||
: read_settings(context_->getBackupReadSettings())
|
|
||||||
, has_throttling(static_cast<bool>(context_->getBackupsThrottler()))
|
|
||||||
{}
|
|
||||||
|
|
||||||
void IBackupWriter::copyDataToFile(const CreateReadBufferFunction & create_read_buffer, UInt64 offset, UInt64 size, const String & dest_file_name)
|
|
||||||
{
|
|
||||||
auto read_buffer = create_read_buffer();
|
|
||||||
if (offset)
|
|
||||||
read_buffer->seek(offset, SEEK_SET);
|
|
||||||
auto write_buffer = writeFile(dest_file_name);
|
|
||||||
copyData(*read_buffer, *write_buffer, size);
|
|
||||||
write_buffer->finalize();
|
|
||||||
}
|
|
||||||
|
|
||||||
void IBackupWriter::copyFileNative(
|
|
||||||
DiskPtr /* src_disk */, const String & /* src_file_name */, UInt64 /* src_offset */, UInt64 /* src_size */, const String & /* dest_file_name */)
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Native copy not implemented for backup writer");
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,58 +1,72 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
#include <Disks/DiskType.h>
|
|
||||||
#include <Disks/IDisk.h>
|
|
||||||
#include <IO/ReadSettings.h>
|
|
||||||
#include <Interpreters/Context_fwd.h>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
class IDisk;
|
||||||
|
using DiskPtr = std::shared_ptr<IDisk>;
|
||||||
class SeekableReadBuffer;
|
class SeekableReadBuffer;
|
||||||
class WriteBuffer;
|
class WriteBuffer;
|
||||||
|
enum class WriteMode;
|
||||||
|
struct WriteSettings;
|
||||||
|
struct ReadSettings;
|
||||||
|
|
||||||
/// Represents operations of loading from disk or downloading for reading a backup.
|
/// Represents operations of loading from disk or downloading for reading a backup.
|
||||||
class IBackupReader /// BackupReaderFile, BackupReaderDisk
|
/// See also implementations: BackupReaderFile, BackupReaderDisk.
|
||||||
|
class IBackupReader
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
virtual ~IBackupReader() = default;
|
virtual ~IBackupReader() = default;
|
||||||
|
|
||||||
virtual bool fileExists(const String & file_name) = 0;
|
virtual bool fileExists(const String & file_name) = 0;
|
||||||
virtual UInt64 getFileSize(const String & file_name) = 0;
|
virtual UInt64 getFileSize(const String & file_name) = 0;
|
||||||
|
|
||||||
virtual std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) = 0;
|
virtual std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) = 0;
|
||||||
virtual void copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
|
|
||||||
WriteMode write_mode, const WriteSettings & write_settings);
|
/// The function copyFileToDisk() can be much faster than reading the file with readFile() and then writing it to some disk.
|
||||||
virtual DataSourceDescription getDataSourceDescription() const = 0;
|
/// (especially for S3 where it can use CopyObject to copy objects inside S3 instead of downloading and uploading them).
|
||||||
|
/// Parameters:
|
||||||
|
/// `encrypted_in_backup` specify if this file is encrypted in the backup, so it shouldn't be encrypted again while restoring to an encrypted disk.
|
||||||
|
virtual void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) = 0;
|
||||||
|
|
||||||
|
virtual const ReadSettings & getReadSettings() const = 0;
|
||||||
|
virtual const WriteSettings & getWriteSettings() const = 0;
|
||||||
|
virtual size_t getWriteBufferSize() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Represents operations of storing to disk or uploading for writing a backup.
|
/// Represents operations of storing to disk or uploading for writing a backup.
|
||||||
class IBackupWriter /// BackupWriterFile, BackupWriterDisk
|
/// See also implementations: BackupWriterFile, BackupWriterDisk
|
||||||
|
class IBackupWriter
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using CreateReadBufferFunction = std::function<std::unique_ptr<SeekableReadBuffer>()>;
|
|
||||||
|
|
||||||
explicit IBackupWriter(const ContextPtr & context_);
|
|
||||||
|
|
||||||
virtual ~IBackupWriter() = default;
|
virtual ~IBackupWriter() = default;
|
||||||
|
|
||||||
virtual bool fileExists(const String & file_name) = 0;
|
virtual bool fileExists(const String & file_name) = 0;
|
||||||
virtual UInt64 getFileSize(const String & file_name) = 0;
|
virtual UInt64 getFileSize(const String & file_name) = 0;
|
||||||
virtual bool fileContentsEqual(const String & file_name, const String & expected_file_contents) = 0;
|
virtual bool fileContentsEqual(const String & file_name, const String & expected_file_contents) = 0;
|
||||||
|
|
||||||
virtual std::unique_ptr<WriteBuffer> writeFile(const String & file_name) = 0;
|
virtual std::unique_ptr<WriteBuffer> writeFile(const String & file_name) = 0;
|
||||||
|
|
||||||
|
using CreateReadBufferFunction = std::function<std::unique_ptr<SeekableReadBuffer>()>;
|
||||||
|
virtual void copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length) = 0;
|
||||||
|
|
||||||
|
/// The function copyFileFromDisk() can be much faster than copyDataToFile()
|
||||||
|
/// (especially for S3 where it can use CopyObject to copy objects inside S3 instead of downloading and uploading them).
|
||||||
|
/// Parameters:
|
||||||
|
/// `start_pos` and `length` specify a part of the file on `src_disk` to copy to the backup.
|
||||||
|
/// `copy_encrypted` specify whether this function should copy encrypted data of the file `src_path` to the backup.
|
||||||
|
virtual void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length) = 0;
|
||||||
|
|
||||||
virtual void removeFile(const String & file_name) = 0;
|
virtual void removeFile(const String & file_name) = 0;
|
||||||
virtual void removeFiles(const Strings & file_names) = 0;
|
virtual void removeFiles(const Strings & file_names) = 0;
|
||||||
virtual DataSourceDescription getDataSourceDescription() const = 0;
|
|
||||||
virtual void copyDataToFile(const CreateReadBufferFunction & create_read_buffer, UInt64 offset, UInt64 size, const String & dest_file_name);
|
|
||||||
virtual bool supportNativeCopy(DataSourceDescription /* data_source_description */) const { return false; }
|
|
||||||
|
|
||||||
/// Copy file using native copy (optimized for S3 to use CopyObject)
|
virtual const ReadSettings & getReadSettings() const = 0;
|
||||||
///
|
virtual const WriteSettings & getWriteSettings() const = 0;
|
||||||
/// NOTE: It still may fall back to copyDataToFile() if native copy is not possible:
|
virtual size_t getWriteBufferSize() const = 0;
|
||||||
/// - different buckets
|
|
||||||
/// - throttling had been requested
|
|
||||||
virtual void copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
const ReadSettings read_settings;
|
|
||||||
const bool has_throttling;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
95
src/Backups/BackupIO_Default.cpp
Normal file
95
src/Backups/BackupIO_Default.cpp
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
#include <Backups/BackupIO_Default.h>
|
||||||
|
|
||||||
|
#include <Disks/IDisk.h>
|
||||||
|
#include <IO/copyData.h>
|
||||||
|
#include <IO/WriteBufferFromFileBase.h>
|
||||||
|
#include <IO/SeekableReadBuffer.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Common/logger_useful.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
BackupReaderDefault::BackupReaderDefault(Poco::Logger * log_, const ContextPtr & context_)
|
||||||
|
: log(log_)
|
||||||
|
, read_settings(context_->getBackupReadSettings())
|
||||||
|
, write_settings(context_->getWriteSettings())
|
||||||
|
, write_buffer_size(DBMS_DEFAULT_BUFFER_SIZE)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupReaderDefault::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Copying file {} to disk {} through buffers", path_in_backup, destination_disk->getName());
|
||||||
|
|
||||||
|
auto read_buffer = readFile(path_in_backup);
|
||||||
|
|
||||||
|
std::unique_ptr<WriteBuffer> write_buffer;
|
||||||
|
auto buf_size = std::min(file_size, write_buffer_size);
|
||||||
|
if (encrypted_in_backup)
|
||||||
|
write_buffer = destination_disk->writeEncryptedFile(destination_path, buf_size, write_mode, write_settings);
|
||||||
|
else
|
||||||
|
write_buffer = destination_disk->writeFile(destination_path, buf_size, write_mode, write_settings);
|
||||||
|
|
||||||
|
copyData(*read_buffer, *write_buffer, file_size);
|
||||||
|
write_buffer->finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupWriterDefault::BackupWriterDefault(Poco::Logger * log_, const ContextPtr & context_)
|
||||||
|
: log(log_)
|
||||||
|
, read_settings(context_->getBackupReadSettings())
|
||||||
|
, write_settings(context_->getWriteSettings())
|
||||||
|
, write_buffer_size(DBMS_DEFAULT_BUFFER_SIZE)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool BackupWriterDefault::fileContentsEqual(const String & file_name, const String & expected_file_contents)
|
||||||
|
{
|
||||||
|
if (!fileExists(file_name))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto in = readFile(file_name, expected_file_contents.size());
|
||||||
|
String actual_file_contents(expected_file_contents.size(), ' ');
|
||||||
|
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
|
||||||
|
&& (actual_file_contents == expected_file_contents) && in->eof();
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupWriterDefault::copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length)
|
||||||
|
{
|
||||||
|
auto read_buffer = create_read_buffer();
|
||||||
|
|
||||||
|
if (start_pos)
|
||||||
|
read_buffer->seek(start_pos, SEEK_SET);
|
||||||
|
|
||||||
|
auto write_buffer = writeFile(path_in_backup);
|
||||||
|
|
||||||
|
copyData(*read_buffer, *write_buffer, length);
|
||||||
|
write_buffer->finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupWriterDefault::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Copying file {} from disk {} through buffers", src_path, src_disk->getName());
|
||||||
|
|
||||||
|
auto create_read_buffer = [src_disk, src_path, copy_encrypted, settings = read_settings.adjustBufferSize(start_pos + length)]
|
||||||
|
{
|
||||||
|
if (copy_encrypted)
|
||||||
|
return src_disk->readEncryptedFile(src_path, settings);
|
||||||
|
else
|
||||||
|
return src_disk->readFile(src_path, settings);
|
||||||
|
};
|
||||||
|
|
||||||
|
copyDataToFile(path_in_backup, create_read_buffer, start_pos, length);
|
||||||
|
}
|
||||||
|
}
|
73
src/Backups/BackupIO_Default.h
Normal file
73
src/Backups/BackupIO_Default.h
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Backups/BackupIO.h>
|
||||||
|
#include <IO/ReadSettings.h>
|
||||||
|
#include <IO/WriteSettings.h>
|
||||||
|
#include <Interpreters/Context_fwd.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
class IDisk;
|
||||||
|
using DiskPtr = std::shared_ptr<IDisk>;
|
||||||
|
class ReadBuffer;
|
||||||
|
class SeekableReadBuffer;
|
||||||
|
class WriteBuffer;
|
||||||
|
enum class WriteMode;
|
||||||
|
|
||||||
|
/// Represents operations of loading from disk or downloading for reading a backup.
|
||||||
|
class BackupReaderDefault : public IBackupReader
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
BackupReaderDefault(Poco::Logger * log_, const ContextPtr & context_);
|
||||||
|
~BackupReaderDefault() override = default;
|
||||||
|
|
||||||
|
/// The function copyFileToDisk() can be much faster than reading the file with readFile() and then writing it to some disk.
|
||||||
|
/// (especially for S3 where it can use CopyObject to copy objects inside S3 instead of downloading and uploading them).
|
||||||
|
/// Parameters:
|
||||||
|
/// `encrypted_in_backup` specify if this file is encrypted in the backup, so it shouldn't be encrypted again while restoring to an encrypted disk.
|
||||||
|
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
|
||||||
|
|
||||||
|
const ReadSettings & getReadSettings() const override { return read_settings; }
|
||||||
|
const WriteSettings & getWriteSettings() const override { return write_settings; }
|
||||||
|
size_t getWriteBufferSize() const override { return write_buffer_size; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
Poco::Logger * const log;
|
||||||
|
const ReadSettings read_settings;
|
||||||
|
|
||||||
|
/// The write settings are used to write to the source disk in copyFileToDisk().
|
||||||
|
const WriteSettings write_settings;
|
||||||
|
const size_t write_buffer_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Represents operations of storing to disk or uploading for writing a backup.
|
||||||
|
class BackupWriterDefault : public IBackupWriter
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
BackupWriterDefault(Poco::Logger * log_, const ContextPtr & context_);
|
||||||
|
~BackupWriterDefault() override = default;
|
||||||
|
|
||||||
|
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
|
||||||
|
void copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length) override;
|
||||||
|
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path, bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
|
||||||
|
|
||||||
|
const ReadSettings & getReadSettings() const override { return read_settings; }
|
||||||
|
const WriteSettings & getWriteSettings() const override { return write_settings; }
|
||||||
|
size_t getWriteBufferSize() const override { return write_buffer_size; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/// Here readFile() is used only to implement fileContentsEqual().
|
||||||
|
virtual std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) = 0;
|
||||||
|
|
||||||
|
Poco::Logger * const log;
|
||||||
|
|
||||||
|
/// The read settings are used to read from the source disk in copyFileFromDisk().
|
||||||
|
const ReadSettings read_settings;
|
||||||
|
|
||||||
|
const WriteSettings write_settings;
|
||||||
|
const size_t write_buffer_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -8,13 +8,11 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
namespace ErrorCodes
|
BackupReaderDisk::BackupReaderDisk(const DiskPtr & disk_, const String & root_path_, const ContextPtr & context_)
|
||||||
{
|
: BackupReaderDefault(&Poco::Logger::get("BackupReaderDisk"), context_)
|
||||||
extern const int LOGICAL_ERROR;
|
, disk(disk_)
|
||||||
}
|
, root_path(root_path_)
|
||||||
|
, data_source_description(disk->getDataSourceDescription())
|
||||||
BackupReaderDisk::BackupReaderDisk(const DiskPtr & disk_, const String & path_)
|
|
||||||
: disk(disk_), path(path_), log(&Poco::Logger::get("BackupReaderDisk"))
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -22,38 +20,47 @@ BackupReaderDisk::~BackupReaderDisk() = default;
|
|||||||
|
|
||||||
bool BackupReaderDisk::fileExists(const String & file_name)
|
bool BackupReaderDisk::fileExists(const String & file_name)
|
||||||
{
|
{
|
||||||
return disk->exists(path / file_name);
|
return disk->exists(root_path / file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt64 BackupReaderDisk::getFileSize(const String & file_name)
|
UInt64 BackupReaderDisk::getFileSize(const String & file_name)
|
||||||
{
|
{
|
||||||
return disk->getFileSize(path / file_name);
|
return disk->getFileSize(root_path / file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> BackupReaderDisk::readFile(const String & file_name)
|
std::unique_ptr<SeekableReadBuffer> BackupReaderDisk::readFile(const String & file_name)
|
||||||
{
|
{
|
||||||
return disk->readFile(path / file_name);
|
return disk->readFile(root_path / file_name, read_settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupReaderDisk::copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
|
void BackupReaderDisk::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
WriteMode write_mode, const WriteSettings & write_settings)
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
|
||||||
{
|
{
|
||||||
if (write_mode == WriteMode::Rewrite)
|
/// Use IDisk::copyFile() as a more optimal way to copy a file if it's possible.
|
||||||
|
/// However IDisk::copyFile() can't use throttling for reading, and can't copy an encrypted file or do appending.
|
||||||
|
bool has_throttling = disk->isRemote() ? static_cast<bool>(read_settings.remote_throttler) : static_cast<bool>(read_settings.local_throttler);
|
||||||
|
if (!has_throttling && (write_mode == WriteMode::Rewrite) && !encrypted_in_backup)
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Copying {}/{} from disk {} to {} by the disk", path, file_name, disk->getName(), destination_disk->getName());
|
auto destination_data_source_description = destination_disk->getDataSourceDescription();
|
||||||
disk->copyFile(path / file_name, *destination_disk, destination_path, write_settings);
|
if (destination_data_source_description.sameKind(data_source_description) && !data_source_description.is_encrypted)
|
||||||
return;
|
{
|
||||||
|
/// Use more optimal way.
|
||||||
|
LOG_TRACE(log, "Copying file {} from disk {} to disk {}", path_in_backup, disk->getName(), destination_disk->getName());
|
||||||
|
disk->copyFile(root_path / path_in_backup, *destination_disk, destination_path, write_settings);
|
||||||
|
return; /// copied!
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TRACE(log, "Copying {}/{} from disk {} to {} through buffers", path, file_name, disk->getName(), destination_disk->getName());
|
/// Fallback to copy through buffers.
|
||||||
IBackupReader::copyFileToDisk(file_name, size, destination_disk, destination_path, write_mode, write_settings);
|
BackupReaderDefault::copyFileToDisk(path_in_backup, file_size, encrypted_in_backup, destination_disk, destination_path, write_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BackupWriterDisk::BackupWriterDisk(const DiskPtr & disk_, const String & path_, const ContextPtr & context_)
|
BackupWriterDisk::BackupWriterDisk(const DiskPtr & disk_, const String & root_path_, const ContextPtr & context_)
|
||||||
: IBackupWriter(context_)
|
: BackupWriterDefault(&Poco::Logger::get("BackupWriterDisk"), context_)
|
||||||
, disk(disk_)
|
, disk(disk_)
|
||||||
, path(path_)
|
, root_path(root_path_)
|
||||||
|
, data_source_description(disk->getDataSourceDescription())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,85 +68,64 @@ BackupWriterDisk::~BackupWriterDisk() = default;
|
|||||||
|
|
||||||
bool BackupWriterDisk::fileExists(const String & file_name)
|
bool BackupWriterDisk::fileExists(const String & file_name)
|
||||||
{
|
{
|
||||||
return disk->exists(path / file_name);
|
return disk->exists(root_path / file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt64 BackupWriterDisk::getFileSize(const String & file_name)
|
UInt64 BackupWriterDisk::getFileSize(const String & file_name)
|
||||||
{
|
{
|
||||||
return disk->getFileSize(path / file_name);
|
return disk->getFileSize(root_path / file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BackupWriterDisk::fileContentsEqual(const String & file_name, const String & expected_file_contents)
|
std::unique_ptr<ReadBuffer> BackupWriterDisk::readFile(const String & file_name, size_t expected_file_size)
|
||||||
{
|
{
|
||||||
if (!disk->exists(path / file_name))
|
return disk->readFile(root_path / file_name, read_settings.adjustBufferSize(expected_file_size));
|
||||||
return false;
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto in = disk->readFile(path / file_name);
|
|
||||||
String actual_file_contents(expected_file_contents.size(), ' ');
|
|
||||||
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
|
|
||||||
&& (actual_file_contents == expected_file_contents) && in->eof();
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<WriteBuffer> BackupWriterDisk::writeFile(const String & file_name)
|
std::unique_ptr<WriteBuffer> BackupWriterDisk::writeFile(const String & file_name)
|
||||||
{
|
{
|
||||||
auto file_path = path / file_name;
|
auto file_path = root_path / file_name;
|
||||||
disk->createDirectories(file_path.parent_path());
|
disk->createDirectories(file_path.parent_path());
|
||||||
return disk->writeFile(file_path);
|
return disk->writeFile(file_path, write_buffer_size, WriteMode::Rewrite, write_settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupWriterDisk::removeFile(const String & file_name)
|
void BackupWriterDisk::removeFile(const String & file_name)
|
||||||
{
|
{
|
||||||
disk->removeFileIfExists(path / file_name);
|
disk->removeFileIfExists(root_path / file_name);
|
||||||
if (disk->isDirectory(path) && disk->isDirectoryEmpty(path))
|
if (disk->isDirectory(root_path) && disk->isDirectoryEmpty(root_path))
|
||||||
disk->removeDirectory(path);
|
disk->removeDirectory(root_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupWriterDisk::removeFiles(const Strings & file_names)
|
void BackupWriterDisk::removeFiles(const Strings & file_names)
|
||||||
{
|
{
|
||||||
for (const auto & file_name : file_names)
|
for (const auto & file_name : file_names)
|
||||||
disk->removeFileIfExists(path / file_name);
|
disk->removeFileIfExists(root_path / file_name);
|
||||||
if (disk->isDirectory(path) && disk->isDirectoryEmpty(path))
|
if (disk->isDirectory(root_path) && disk->isDirectoryEmpty(root_path))
|
||||||
disk->removeDirectory(path);
|
disk->removeDirectory(root_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
DataSourceDescription BackupWriterDisk::getDataSourceDescription() const
|
void BackupWriterDisk::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length)
|
||||||
{
|
{
|
||||||
return disk->getDataSourceDescription();
|
/// Use IDisk::copyFile() as a more optimal way to copy a file if it's possible.
|
||||||
}
|
/// However IDisk::copyFile() can't use throttling for reading, and can't copy an encrypted file or copy a part of the file.
|
||||||
|
bool has_throttling = src_disk->isRemote() ? static_cast<bool>(read_settings.remote_throttler) : static_cast<bool>(read_settings.local_throttler);
|
||||||
DataSourceDescription BackupReaderDisk::getDataSourceDescription() const
|
if (!has_throttling && !start_pos && !copy_encrypted)
|
||||||
{
|
|
||||||
return disk->getDataSourceDescription();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool BackupWriterDisk::supportNativeCopy(DataSourceDescription data_source_description) const
|
|
||||||
{
|
|
||||||
return data_source_description == disk->getDataSourceDescription();
|
|
||||||
}
|
|
||||||
|
|
||||||
void BackupWriterDisk::copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name)
|
|
||||||
{
|
|
||||||
if (!src_disk)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot natively copy data to disk without source disk");
|
|
||||||
|
|
||||||
if (has_throttling || (src_offset != 0) || (src_size != src_disk->getFileSize(src_file_name)))
|
|
||||||
{
|
{
|
||||||
auto create_read_buffer = [this, src_disk, src_file_name] { return src_disk->readFile(src_file_name, read_settings); };
|
auto source_data_source_description = src_disk->getDataSourceDescription();
|
||||||
copyDataToFile(create_read_buffer, src_offset, src_size, dest_file_name);
|
if (source_data_source_description.sameKind(data_source_description) && !source_data_source_description.is_encrypted
|
||||||
return;
|
&& (length == src_disk->getFileSize(src_path)))
|
||||||
|
{
|
||||||
|
/// Use more optimal way.
|
||||||
|
LOG_TRACE(log, "Copying file {} from disk {} to disk {}", src_path, src_disk->getName(), disk->getName());
|
||||||
|
auto dest_file_path = root_path / path_in_backup;
|
||||||
|
disk->createDirectories(dest_file_path.parent_path());
|
||||||
|
src_disk->copyFile(src_path, *disk, dest_file_path, write_settings);
|
||||||
|
return; /// copied!
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto file_path = path / dest_file_name;
|
/// Fallback to copy through buffers.
|
||||||
disk->createDirectories(file_path.parent_path());
|
BackupWriterDefault::copyFileFromDisk(path_in_backup, src_disk, src_path, copy_encrypted, start_pos, length);
|
||||||
src_disk->copyFile(src_file_name, *disk, file_path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,53 +1,58 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <Backups/BackupIO_Default.h>
|
||||||
|
#include <Disks/DiskType.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <Backups/BackupIO.h>
|
|
||||||
#include <Interpreters/Context_fwd.h>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
class IDisk;
|
class IDisk;
|
||||||
using DiskPtr = std::shared_ptr<IDisk>;
|
using DiskPtr = std::shared_ptr<IDisk>;
|
||||||
|
|
||||||
class BackupReaderDisk : public IBackupReader
|
class BackupReaderDisk : public BackupReaderDefault
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BackupReaderDisk(const DiskPtr & disk_, const String & path_);
|
BackupReaderDisk(const DiskPtr & disk_, const String & root_path_, const ContextPtr & context_);
|
||||||
~BackupReaderDisk() override;
|
~BackupReaderDisk() override;
|
||||||
|
|
||||||
bool fileExists(const String & file_name) override;
|
bool fileExists(const String & file_name) override;
|
||||||
UInt64 getFileSize(const String & file_name) override;
|
UInt64 getFileSize(const String & file_name) override;
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
|
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
|
||||||
void copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
|
|
||||||
WriteMode write_mode, const WriteSettings & write_settings) override;
|
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
DataSourceDescription getDataSourceDescription() const override;
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DiskPtr disk;
|
const DiskPtr disk;
|
||||||
std::filesystem::path path;
|
const std::filesystem::path root_path;
|
||||||
Poco::Logger * log;
|
const DataSourceDescription data_source_description;
|
||||||
};
|
};
|
||||||
|
|
||||||
class BackupWriterDisk : public IBackupWriter
|
class BackupWriterDisk : public BackupWriterDefault
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BackupWriterDisk(const DiskPtr & disk_, const String & path_, const ContextPtr & context_);
|
BackupWriterDisk(const DiskPtr & disk_, const String & root_path_, const ContextPtr & context_);
|
||||||
~BackupWriterDisk() override;
|
~BackupWriterDisk() override;
|
||||||
|
|
||||||
bool fileExists(const String & file_name) override;
|
bool fileExists(const String & file_name) override;
|
||||||
UInt64 getFileSize(const String & file_name) override;
|
UInt64 getFileSize(const String & file_name) override;
|
||||||
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
|
|
||||||
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
||||||
|
|
||||||
|
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
|
||||||
|
|
||||||
void removeFile(const String & file_name) override;
|
void removeFile(const String & file_name) override;
|
||||||
void removeFiles(const Strings & file_names) override;
|
void removeFiles(const Strings & file_names) override;
|
||||||
DataSourceDescription getDataSourceDescription() const override;
|
|
||||||
|
|
||||||
bool supportNativeCopy(DataSourceDescription data_source_description) const override;
|
|
||||||
void copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DiskPtr disk;
|
std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) override;
|
||||||
std::filesystem::path path;
|
|
||||||
|
const DiskPtr disk;
|
||||||
|
const std::filesystem::path root_path;
|
||||||
|
const DataSourceDescription data_source_description;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,7 @@
|
|||||||
#include <Backups/BackupIO_File.h>
|
#include <Backups/BackupIO_File.h>
|
||||||
#include <Disks/IDisk.h>
|
#include <Disks/DiskLocal.h>
|
||||||
#include <Disks/IO/createReadBufferFromFileBase.h>
|
#include <Disks/IO/createReadBufferFromFileBase.h>
|
||||||
#include <IO/WriteBufferFromFile.h>
|
#include <IO/WriteBufferFromFile.h>
|
||||||
#include <IO/copyData.h>
|
|
||||||
#include <Common/filesystemHelpers.h>
|
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
|
|
||||||
|
|
||||||
@ -12,158 +10,146 @@ namespace fs = std::filesystem;
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
BackupReaderFile::BackupReaderFile(const String & path_) : path(path_), log(&Poco::Logger::get("BackupReaderFile"))
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupReaderFile::BackupReaderFile(const String & root_path_, const ContextPtr & context_)
|
||||||
|
: BackupReaderDefault(&Poco::Logger::get("BackupReaderFile"), context_)
|
||||||
|
, root_path(root_path_)
|
||||||
|
, data_source_description(DiskLocal::getLocalDataSourceDescription(root_path))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
BackupReaderFile::~BackupReaderFile() = default;
|
|
||||||
|
|
||||||
bool BackupReaderFile::fileExists(const String & file_name)
|
bool BackupReaderFile::fileExists(const String & file_name)
|
||||||
{
|
{
|
||||||
return fs::exists(path / file_name);
|
return fs::exists(root_path / file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt64 BackupReaderFile::getFileSize(const String & file_name)
|
UInt64 BackupReaderFile::getFileSize(const String & file_name)
|
||||||
{
|
{
|
||||||
return fs::file_size(path / file_name);
|
return fs::file_size(root_path / file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> BackupReaderFile::readFile(const String & file_name)
|
std::unique_ptr<SeekableReadBuffer> BackupReaderFile::readFile(const String & file_name)
|
||||||
{
|
{
|
||||||
return createReadBufferFromFileBase(path / file_name, {});
|
return createReadBufferFromFileBase(root_path / file_name, read_settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupReaderFile::copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
|
void BackupReaderFile::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
WriteMode write_mode, const WriteSettings & write_settings)
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
|
||||||
{
|
{
|
||||||
if (destination_disk->getDataSourceDescription() == getDataSourceDescription())
|
/// std::filesystem::copy() can copy from the filesystem only, and can't do throttling or appending.
|
||||||
|
bool has_throttling = static_cast<bool>(read_settings.local_throttler);
|
||||||
|
if (!has_throttling && (write_mode == WriteMode::Rewrite))
|
||||||
{
|
{
|
||||||
/// Use more optimal way.
|
auto destination_data_source_description = destination_disk->getDataSourceDescription();
|
||||||
LOG_TRACE(log, "Copying {}/{} to disk {} locally", path, file_name, destination_disk->getName());
|
if (destination_data_source_description.sameKind(data_source_description)
|
||||||
fs::copy(path / file_name, fullPath(destination_disk, destination_path), fs::copy_options::overwrite_existing);
|
&& (destination_data_source_description.is_encrypted == encrypted_in_backup))
|
||||||
return;
|
{
|
||||||
|
/// Use more optimal way.
|
||||||
|
LOG_TRACE(log, "Copying file {} to disk {} locally", path_in_backup, destination_disk->getName());
|
||||||
|
|
||||||
|
auto write_blob_function = [abs_source_path = root_path / path_in_backup, file_size](
|
||||||
|
const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> &) -> size_t
|
||||||
|
{
|
||||||
|
/// For local disks the size of a blob path is expected to be 1.
|
||||||
|
if (blob_path.size() != 1 || mode != WriteMode::Rewrite)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Blob writing function called with unexpected blob_path.size={} or mode={}",
|
||||||
|
blob_path.size(), mode);
|
||||||
|
fs::copy(abs_source_path, blob_path.at(0), fs::copy_options::overwrite_existing);
|
||||||
|
return file_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
destination_disk->writeFileUsingBlobWritingFunction(destination_path, write_mode, write_blob_function);
|
||||||
|
return; /// copied!
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TRACE(log, "Copying {}/{} to disk {} through buffers", path, file_name, destination_disk->getName());
|
/// Fallback to copy through buffers.
|
||||||
IBackupReader::copyFileToDisk(path / file_name, size, destination_disk, destination_path, write_mode, write_settings);
|
BackupReaderDefault::copyFileToDisk(path_in_backup, file_size, encrypted_in_backup, destination_disk, destination_path, write_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BackupWriterFile::BackupWriterFile(const String & path_, const ContextPtr & context_)
|
BackupWriterFile::BackupWriterFile(const String & root_path_, const ContextPtr & context_)
|
||||||
: IBackupWriter(context_)
|
: BackupWriterDefault(&Poco::Logger::get("BackupWriterFile"), context_)
|
||||||
, path(path_)
|
, root_path(root_path_)
|
||||||
|
, data_source_description(DiskLocal::getLocalDataSourceDescription(root_path))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
BackupWriterFile::~BackupWriterFile() = default;
|
|
||||||
|
|
||||||
bool BackupWriterFile::fileExists(const String & file_name)
|
bool BackupWriterFile::fileExists(const String & file_name)
|
||||||
{
|
{
|
||||||
return fs::exists(path / file_name);
|
return fs::exists(root_path / file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt64 BackupWriterFile::getFileSize(const String & file_name)
|
UInt64 BackupWriterFile::getFileSize(const String & file_name)
|
||||||
{
|
{
|
||||||
return fs::file_size(path / file_name);
|
return fs::file_size(root_path / file_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BackupWriterFile::fileContentsEqual(const String & file_name, const String & expected_file_contents)
|
std::unique_ptr<ReadBuffer> BackupWriterFile::readFile(const String & file_name, size_t expected_file_size)
|
||||||
{
|
{
|
||||||
if (!fs::exists(path / file_name))
|
return createReadBufferFromFileBase(root_path / file_name, read_settings.adjustBufferSize(expected_file_size));
|
||||||
return false;
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto in = createReadBufferFromFileBase(path / file_name, {});
|
|
||||||
String actual_file_contents(expected_file_contents.size(), ' ');
|
|
||||||
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
|
|
||||||
&& (actual_file_contents == expected_file_contents) && in->eof();
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<WriteBuffer> BackupWriterFile::writeFile(const String & file_name)
|
std::unique_ptr<WriteBuffer> BackupWriterFile::writeFile(const String & file_name)
|
||||||
{
|
{
|
||||||
auto file_path = path / file_name;
|
auto file_path = root_path / file_name;
|
||||||
fs::create_directories(file_path.parent_path());
|
fs::create_directories(file_path.parent_path());
|
||||||
return std::make_unique<WriteBufferFromFile>(file_path);
|
return std::make_unique<WriteBufferFromFile>(file_path, write_buffer_size, -1, write_settings.local_throttler);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupWriterFile::removeFile(const String & file_name)
|
void BackupWriterFile::removeFile(const String & file_name)
|
||||||
{
|
{
|
||||||
fs::remove(path / file_name);
|
fs::remove(root_path / file_name);
|
||||||
if (fs::is_directory(path) && fs::is_empty(path))
|
if (fs::is_directory(root_path) && fs::is_empty(root_path))
|
||||||
fs::remove(path);
|
fs::remove(root_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupWriterFile::removeFiles(const Strings & file_names)
|
void BackupWriterFile::removeFiles(const Strings & file_names)
|
||||||
{
|
{
|
||||||
for (const auto & file_name : file_names)
|
for (const auto & file_name : file_names)
|
||||||
fs::remove(path / file_name);
|
fs::remove(root_path / file_name);
|
||||||
if (fs::is_directory(path) && fs::is_empty(path))
|
if (fs::is_directory(root_path) && fs::is_empty(root_path))
|
||||||
fs::remove(path);
|
fs::remove(root_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
DataSourceDescription BackupWriterFile::getDataSourceDescription() const
|
void BackupWriterFile::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length)
|
||||||
{
|
{
|
||||||
DataSourceDescription data_source_description;
|
/// std::filesystem::copy() can copy from the filesystem only, and can't do throttling or copy a part of the file.
|
||||||
|
bool has_throttling = static_cast<bool>(read_settings.local_throttler);
|
||||||
data_source_description.type = DataSourceType::Local;
|
if (!has_throttling)
|
||||||
|
|
||||||
if (auto block_device_id = tryGetBlockDeviceId(path); block_device_id.has_value())
|
|
||||||
data_source_description.description = *block_device_id;
|
|
||||||
else
|
|
||||||
data_source_description.description = path;
|
|
||||||
data_source_description.is_encrypted = false;
|
|
||||||
data_source_description.is_cached = false;
|
|
||||||
|
|
||||||
return data_source_description;
|
|
||||||
}
|
|
||||||
|
|
||||||
DataSourceDescription BackupReaderFile::getDataSourceDescription() const
|
|
||||||
{
|
|
||||||
DataSourceDescription data_source_description;
|
|
||||||
|
|
||||||
data_source_description.type = DataSourceType::Local;
|
|
||||||
|
|
||||||
if (auto block_device_id = tryGetBlockDeviceId(path); block_device_id.has_value())
|
|
||||||
data_source_description.description = *block_device_id;
|
|
||||||
else
|
|
||||||
data_source_description.description = path;
|
|
||||||
data_source_description.is_encrypted = false;
|
|
||||||
data_source_description.is_cached = false;
|
|
||||||
|
|
||||||
return data_source_description;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool BackupWriterFile::supportNativeCopy(DataSourceDescription data_source_description) const
|
|
||||||
{
|
|
||||||
return data_source_description == getDataSourceDescription();
|
|
||||||
}
|
|
||||||
|
|
||||||
void BackupWriterFile::copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name)
|
|
||||||
{
|
|
||||||
std::string abs_source_path;
|
|
||||||
if (src_disk)
|
|
||||||
abs_source_path = fullPath(src_disk, src_file_name);
|
|
||||||
else
|
|
||||||
abs_source_path = fs::absolute(src_file_name);
|
|
||||||
|
|
||||||
if (has_throttling || (src_offset != 0) || (src_size != fs::file_size(abs_source_path)))
|
|
||||||
{
|
{
|
||||||
auto create_read_buffer = [this, abs_source_path] { return createReadBufferFromFileBase(abs_source_path, read_settings); };
|
auto source_data_source_description = src_disk->getDataSourceDescription();
|
||||||
copyDataToFile(create_read_buffer, src_offset, src_size, dest_file_name);
|
if (source_data_source_description.sameKind(data_source_description)
|
||||||
return;
|
&& (source_data_source_description.is_encrypted == copy_encrypted))
|
||||||
|
{
|
||||||
|
/// std::filesystem::copy() can copy from a single file only.
|
||||||
|
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 1)
|
||||||
|
{
|
||||||
|
auto abs_source_path = blob_path[0];
|
||||||
|
|
||||||
|
/// std::filesystem::copy() can copy a file as a whole only.
|
||||||
|
if ((start_pos == 0) && (length == fs::file_size(abs_source_path)))
|
||||||
|
{
|
||||||
|
/// Use more optimal way.
|
||||||
|
LOG_TRACE(log, "Copying file {} from disk {} locally", src_path, src_disk->getName());
|
||||||
|
auto abs_dest_path = root_path / path_in_backup;
|
||||||
|
fs::create_directories(abs_dest_path.parent_path());
|
||||||
|
fs::copy(abs_source_path, abs_dest_path, fs::copy_options::overwrite_existing);
|
||||||
|
return; /// copied!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto file_path = path / dest_file_name;
|
/// Fallback to copy through buffers.
|
||||||
fs::create_directories(file_path.parent_path());
|
BackupWriterDefault::copyFileFromDisk(path_in_backup, src_disk, src_path, copy_encrypted, start_pos, length);
|
||||||
fs::copy(abs_source_path, file_path, fs::copy_options::overwrite_existing);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,48 +1,51 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <Backups/BackupIO_Default.h>
|
||||||
|
#include <Disks/DiskType.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <Backups/BackupIO.h>
|
|
||||||
#include <Interpreters/Context_fwd.h>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
class BackupReaderFile : public IBackupReader
|
class BackupReaderFile : public BackupReaderDefault
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit BackupReaderFile(const String & path_);
|
explicit BackupReaderFile(const String & root_path_, const ContextPtr & context_);
|
||||||
~BackupReaderFile() override;
|
|
||||||
|
|
||||||
bool fileExists(const String & file_name) override;
|
bool fileExists(const String & file_name) override;
|
||||||
UInt64 getFileSize(const String & file_name) override;
|
UInt64 getFileSize(const String & file_name) override;
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
|
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
|
||||||
void copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
|
|
||||||
WriteMode write_mode, const WriteSettings & write_settings) override;
|
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
DataSourceDescription getDataSourceDescription() const override;
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::filesystem::path path;
|
const std::filesystem::path root_path;
|
||||||
Poco::Logger * log;
|
const DataSourceDescription data_source_description;
|
||||||
};
|
};
|
||||||
|
|
||||||
class BackupWriterFile : public IBackupWriter
|
class BackupWriterFile : public BackupWriterDefault
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit BackupWriterFile(const String & path_, const ContextPtr & context_);
|
BackupWriterFile(const String & root_path_, const ContextPtr & context_);
|
||||||
~BackupWriterFile() override;
|
|
||||||
|
|
||||||
bool fileExists(const String & file_name) override;
|
bool fileExists(const String & file_name) override;
|
||||||
UInt64 getFileSize(const String & file_name) override;
|
UInt64 getFileSize(const String & file_name) override;
|
||||||
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
|
|
||||||
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
||||||
|
|
||||||
|
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
|
||||||
|
|
||||||
void removeFile(const String & file_name) override;
|
void removeFile(const String & file_name) override;
|
||||||
void removeFiles(const Strings & file_names) override;
|
void removeFiles(const Strings & file_names) override;
|
||||||
DataSourceDescription getDataSourceDescription() const override;
|
|
||||||
bool supportNativeCopy(DataSourceDescription data_source_description) const override;
|
|
||||||
void copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::filesystem::path path;
|
std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) override;
|
||||||
|
|
||||||
|
const std::filesystem::path root_path;
|
||||||
|
const DataSourceDescription data_source_description;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
#if USE_AWS_S3
|
#if USE_AWS_S3
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <Disks/ObjectStorages/S3/copyS3FileToDisk.h>
|
|
||||||
#include <Interpreters/threadPoolCallbackRunner.h>
|
#include <Interpreters/threadPoolCallbackRunner.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <IO/SharedThreadPools.h>
|
#include <IO/SharedThreadPools.h>
|
||||||
@ -12,6 +11,7 @@
|
|||||||
#include <IO/S3/copyS3File.h>
|
#include <IO/S3/copyS3File.h>
|
||||||
#include <IO/S3/Client.h>
|
#include <IO/S3/Client.h>
|
||||||
#include <IO/S3/Credentials.h>
|
#include <IO/S3/Credentials.h>
|
||||||
|
#include <Disks/IDisk.h>
|
||||||
|
|
||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
|
|
||||||
@ -102,21 +102,15 @@ namespace
|
|||||||
|
|
||||||
BackupReaderS3::BackupReaderS3(
|
BackupReaderS3::BackupReaderS3(
|
||||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
|
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
|
||||||
: s3_uri(s3_uri_)
|
: BackupReaderDefault(&Poco::Logger::get("BackupReaderS3"), context_)
|
||||||
|
, s3_uri(s3_uri_)
|
||||||
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
||||||
, read_settings(context_->getReadSettings())
|
|
||||||
, request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings)
|
, request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings)
|
||||||
, log(&Poco::Logger::get("BackupReaderS3"))
|
, data_source_description{DataSourceType::S3, s3_uri.endpoint, false, false}
|
||||||
{
|
{
|
||||||
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
DataSourceDescription BackupReaderS3::getDataSourceDescription() const
|
|
||||||
{
|
|
||||||
return DataSourceDescription{DataSourceType::S3, s3_uri.endpoint, false, false};
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
BackupReaderS3::~BackupReaderS3() = default;
|
BackupReaderS3::~BackupReaderS3() = default;
|
||||||
|
|
||||||
bool BackupReaderS3::fileExists(const String & file_name)
|
bool BackupReaderS3::fileExists(const String & file_name)
|
||||||
@ -138,75 +132,98 @@ std::unique_ptr<SeekableReadBuffer> BackupReaderS3::readFile(const String & file
|
|||||||
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, request_settings, read_settings);
|
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, request_settings, read_settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupReaderS3::copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
|
void BackupReaderS3::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
WriteMode write_mode, const WriteSettings & write_settings)
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Copying {} to disk {}", file_name, destination_disk->getName());
|
/// Use the native copy as a more optimal way to copy a file from S3 to S3 if it's possible.
|
||||||
|
/// We don't check for `has_throttling` here because the native copy almost doesn't use network.
|
||||||
|
auto destination_data_source_description = destination_disk->getDataSourceDescription();
|
||||||
|
if (destination_data_source_description.sameKind(data_source_description)
|
||||||
|
&& (destination_data_source_description.is_encrypted == encrypted_in_backup))
|
||||||
|
{
|
||||||
|
/// Use native copy, the more optimal way.
|
||||||
|
LOG_TRACE(log, "Copying {} from S3 to disk {} using native copy", path_in_backup, destination_disk->getName());
|
||||||
|
auto write_blob_function = [&](const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> & object_attributes) -> size_t
|
||||||
|
{
|
||||||
|
/// Object storage always uses mode `Rewrite` because it simulates append using metadata and different files.
|
||||||
|
if (blob_path.size() != 2 || mode != WriteMode::Rewrite)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Blob writing function called with unexpected blob_path.size={} or mode={}",
|
||||||
|
blob_path.size(), mode);
|
||||||
|
|
||||||
copyS3FileToDisk(
|
copyS3File(
|
||||||
client,
|
client,
|
||||||
s3_uri.bucket,
|
s3_uri.bucket,
|
||||||
fs::path(s3_uri.key) / file_name,
|
fs::path(s3_uri.key) / path_in_backup,
|
||||||
s3_uri.version_id,
|
0,
|
||||||
0,
|
file_size,
|
||||||
size,
|
/* dest_bucket= */ blob_path[1],
|
||||||
destination_disk,
|
/* dest_key= */ blob_path[0],
|
||||||
destination_path,
|
request_settings,
|
||||||
write_mode,
|
object_attributes,
|
||||||
read_settings,
|
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupReaderS3"),
|
||||||
write_settings,
|
/* for_disk_s3= */ true);
|
||||||
request_settings,
|
|
||||||
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupReaderS3"));
|
return file_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
destination_disk->writeFileUsingBlobWritingFunction(destination_path, write_mode, write_blob_function);
|
||||||
|
return; /// copied!
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fallback to copy through buffers.
|
||||||
|
BackupReaderDefault::copyFileToDisk(path_in_backup, file_size, encrypted_in_backup, destination_disk, destination_path, write_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BackupWriterS3::BackupWriterS3(
|
BackupWriterS3::BackupWriterS3(
|
||||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
|
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
|
||||||
: IBackupWriter(context_)
|
: BackupWriterDefault(&Poco::Logger::get("BackupWriterS3"), context_)
|
||||||
, s3_uri(s3_uri_)
|
, s3_uri(s3_uri_)
|
||||||
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
||||||
, request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings)
|
, request_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).request_settings)
|
||||||
, log(&Poco::Logger::get("BackupWriterS3"))
|
, data_source_description{DataSourceType::S3, s3_uri.endpoint, false, false}
|
||||||
{
|
{
|
||||||
request_settings.updateFromSettings(context_->getSettingsRef());
|
request_settings.updateFromSettings(context_->getSettingsRef());
|
||||||
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
request_settings.max_single_read_retries = context_->getSettingsRef().s3_max_single_read_retries; // FIXME: Avoid taking value for endpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
DataSourceDescription BackupWriterS3::getDataSourceDescription() const
|
void BackupWriterS3::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length)
|
||||||
{
|
{
|
||||||
return DataSourceDescription{DataSourceType::S3, s3_uri.endpoint, false, false};
|
/// Use the native copy as a more optimal way to copy a file from S3 to S3 if it's possible.
|
||||||
}
|
/// We don't check for `has_throttling` here because the native copy almost doesn't use network.
|
||||||
|
auto source_data_source_description = src_disk->getDataSourceDescription();
|
||||||
bool BackupWriterS3::supportNativeCopy(DataSourceDescription data_source_description) const
|
if (source_data_source_description.sameKind(data_source_description) && (source_data_source_description.is_encrypted == copy_encrypted))
|
||||||
{
|
|
||||||
return getDataSourceDescription() == data_source_description;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BackupWriterS3::copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name)
|
|
||||||
{
|
|
||||||
if (!src_disk)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot natively copy data to disk without source disk");
|
|
||||||
|
|
||||||
auto objects = src_disk->getStorageObjects(src_file_name);
|
|
||||||
if (objects.size() > 1)
|
|
||||||
{
|
{
|
||||||
auto create_read_buffer = [this, src_disk, src_file_name] { return src_disk->readFile(src_file_name, read_settings); };
|
/// getBlobPath() can return more than 3 elements if the file is stored as multiple objects in S3 bucket.
|
||||||
copyDataToFile(create_read_buffer, src_offset, src_size, dest_file_name);
|
/// In this case we can't use the native copy.
|
||||||
}
|
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 2)
|
||||||
else
|
{
|
||||||
{
|
/// Use native copy, the more optimal way.
|
||||||
auto object_storage = src_disk->getObjectStorage();
|
LOG_TRACE(log, "Copying file {} from disk {} to S3 using native copy", src_path, src_disk->getName());
|
||||||
std::string src_bucket = object_storage->getObjectsNamespace();
|
copyS3File(
|
||||||
auto file_path = fs::path(s3_uri.key) / dest_file_name;
|
client,
|
||||||
copyS3File(client, src_bucket, objects[0].remote_path, src_offset, src_size, s3_uri.bucket, file_path, request_settings, {},
|
/* src_bucket */ blob_path[1],
|
||||||
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
|
/* src_key= */ blob_path[0],
|
||||||
|
start_pos,
|
||||||
|
length,
|
||||||
|
s3_uri.bucket,
|
||||||
|
fs::path(s3_uri.key) / path_in_backup,
|
||||||
|
request_settings,
|
||||||
|
{},
|
||||||
|
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
|
||||||
|
return; /// copied!
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fallback to copy through buffers.
|
||||||
|
BackupWriterDefault::copyFileFromDisk(path_in_backup, src_disk, src_path, copy_encrypted, start_pos, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupWriterS3::copyDataToFile(
|
void BackupWriterS3::copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length)
|
||||||
const CreateReadBufferFunction & create_read_buffer, UInt64 offset, UInt64 size, const String & dest_file_name)
|
|
||||||
{
|
{
|
||||||
copyDataToS3File(create_read_buffer, offset, size, client, s3_uri.bucket, fs::path(s3_uri.key) / dest_file_name, request_settings, {},
|
copyDataToS3File(create_read_buffer, start_pos, length, client, s3_uri.bucket, fs::path(s3_uri.key) / path_in_backup, request_settings, {},
|
||||||
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
|
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,24 +242,11 @@ UInt64 BackupWriterS3::getFileSize(const String & file_name)
|
|||||||
return objects[0].GetSize();
|
return objects[0].GetSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool BackupWriterS3::fileContentsEqual(const String & file_name, const String & expected_file_contents)
|
std::unique_ptr<ReadBuffer> BackupWriterS3::readFile(const String & file_name, size_t expected_file_size)
|
||||||
{
|
{
|
||||||
if (listObjects(*client, s3_uri, file_name).empty())
|
return std::make_unique<ReadBufferFromS3>(
|
||||||
return false;
|
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, request_settings, read_settings,
|
||||||
|
false, 0, 0, false, expected_file_size);
|
||||||
try
|
|
||||||
{
|
|
||||||
auto in = std::make_unique<ReadBufferFromS3>(
|
|
||||||
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, request_settings, read_settings);
|
|
||||||
String actual_file_contents(expected_file_contents.size(), ' ');
|
|
||||||
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
|
|
||||||
&& (actual_file_contents == expected_file_contents) && in->eof();
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
|
std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
|
||||||
@ -251,9 +255,11 @@ std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
|
|||||||
client,
|
client,
|
||||||
s3_uri.bucket,
|
s3_uri.bucket,
|
||||||
fs::path(s3_uri.key) / file_name,
|
fs::path(s3_uri.key) / file_name,
|
||||||
|
DBMS_DEFAULT_BUFFER_SIZE,
|
||||||
request_settings,
|
request_settings,
|
||||||
std::nullopt,
|
std::nullopt,
|
||||||
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"));
|
threadPoolCallbackRunner<void>(BackupsIOThreadPool::get(), "BackupWriterS3"),
|
||||||
|
write_settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BackupWriterS3::removeFile(const String & file_name)
|
void BackupWriterS3::removeFile(const String & file_name)
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
#if USE_AWS_S3
|
#if USE_AWS_S3
|
||||||
#include <Backups/BackupIO.h>
|
#include <Backups/BackupIO_Default.h>
|
||||||
#include <IO/ReadSettings.h>
|
#include <Disks/DiskType.h>
|
||||||
#include <IO/S3Common.h>
|
#include <IO/S3Common.h>
|
||||||
#include <Storages/StorageS3Settings.h>
|
#include <Storages/StorageS3Settings.h>
|
||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
@ -14,7 +14,7 @@ namespace DB
|
|||||||
{
|
{
|
||||||
|
|
||||||
/// Represents a backup stored to AWS S3.
|
/// Represents a backup stored to AWS S3.
|
||||||
class BackupReaderS3 : public IBackupReader
|
class BackupReaderS3 : public BackupReaderDefault
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
|
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
|
||||||
@ -23,20 +23,19 @@ public:
|
|||||||
bool fileExists(const String & file_name) override;
|
bool fileExists(const String & file_name) override;
|
||||||
UInt64 getFileSize(const String & file_name) override;
|
UInt64 getFileSize(const String & file_name) override;
|
||||||
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
|
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
|
||||||
void copyFileToDisk(const String & file_name, size_t size, DiskPtr destination_disk, const String & destination_path,
|
|
||||||
WriteMode write_mode, const WriteSettings & write_settings) override;
|
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
DataSourceDescription getDataSourceDescription() const override;
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
S3::URI s3_uri;
|
const S3::URI s3_uri;
|
||||||
std::shared_ptr<S3::Client> client;
|
const std::shared_ptr<S3::Client> client;
|
||||||
ReadSettings read_settings;
|
|
||||||
S3Settings::RequestSettings request_settings;
|
S3Settings::RequestSettings request_settings;
|
||||||
Poco::Logger * log;
|
const DataSourceDescription data_source_description;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class BackupWriterS3 : public IBackupWriter
|
class BackupWriterS3 : public BackupWriterDefault
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
|
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
|
||||||
@ -44,42 +43,24 @@ public:
|
|||||||
|
|
||||||
bool fileExists(const String & file_name) override;
|
bool fileExists(const String & file_name) override;
|
||||||
UInt64 getFileSize(const String & file_name) override;
|
UInt64 getFileSize(const String & file_name) override;
|
||||||
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
|
|
||||||
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
||||||
|
|
||||||
void copyDataToFile(const CreateReadBufferFunction & create_read_buffer, UInt64 offset, UInt64 size, const String & dest_file_name) override;
|
void copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length) override;
|
||||||
|
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
|
||||||
|
|
||||||
void removeFile(const String & file_name) override;
|
void removeFile(const String & file_name) override;
|
||||||
void removeFiles(const Strings & file_names) override;
|
void removeFiles(const Strings & file_names) override;
|
||||||
|
|
||||||
DataSourceDescription getDataSourceDescription() const override;
|
|
||||||
bool supportNativeCopy(DataSourceDescription data_source_description) const override;
|
|
||||||
void copyFileNative(DiskPtr src_disk, const String & src_file_name, UInt64 src_offset, UInt64 src_size, const String & dest_file_name) override;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void copyObjectImpl(
|
std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) override;
|
||||||
const String & src_bucket,
|
|
||||||
const String & src_key,
|
|
||||||
const String & dst_bucket,
|
|
||||||
const String & dst_key,
|
|
||||||
size_t size,
|
|
||||||
const std::optional<ObjectAttributes> & metadata = std::nullopt) const;
|
|
||||||
|
|
||||||
void copyObjectMultipartImpl(
|
|
||||||
const String & src_bucket,
|
|
||||||
const String & src_key,
|
|
||||||
const String & dst_bucket,
|
|
||||||
const String & dst_key,
|
|
||||||
size_t size,
|
|
||||||
const std::optional<ObjectAttributes> & metadata = std::nullopt) const;
|
|
||||||
|
|
||||||
void removeFilesBatch(const Strings & file_names);
|
void removeFilesBatch(const Strings & file_names);
|
||||||
|
|
||||||
S3::URI s3_uri;
|
const S3::URI s3_uri;
|
||||||
std::shared_ptr<S3::Client> client;
|
const std::shared_ptr<S3::Client> client;
|
||||||
S3Settings::RequestSettings request_settings;
|
S3Settings::RequestSettings request_settings;
|
||||||
Poco::Logger * log;
|
|
||||||
std::optional<bool> supports_batch_delete;
|
std::optional<bool> supports_batch_delete;
|
||||||
|
const DataSourceDescription data_source_description;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,7 @@ namespace ErrorCodes
|
|||||||
extern const int WRONG_BASE_BACKUP;
|
extern const int WRONG_BASE_BACKUP;
|
||||||
extern const int BACKUP_ENTRY_NOT_FOUND;
|
extern const int BACKUP_ENTRY_NOT_FOUND;
|
||||||
extern const int BACKUP_IS_EMPTY;
|
extern const int BACKUP_IS_EMPTY;
|
||||||
|
extern const int CANNOT_RESTORE_TO_NONENCRYPTED_DISK;
|
||||||
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
|
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
@ -339,6 +340,8 @@ void BackupImpl::writeBackupMetadata()
|
|||||||
}
|
}
|
||||||
if (!info.data_file_name.empty() && (info.data_file_name != info.file_name))
|
if (!info.data_file_name.empty() && (info.data_file_name != info.file_name))
|
||||||
*out << "<data_file>" << xml << info.data_file_name << "</data_file>";
|
*out << "<data_file>" << xml << info.data_file_name << "</data_file>";
|
||||||
|
if (info.encrypted_by_disk)
|
||||||
|
*out << "<encrypted_by_disk>true</encrypted_by_disk>";
|
||||||
}
|
}
|
||||||
|
|
||||||
total_size += info.size;
|
total_size += info.size;
|
||||||
@ -444,6 +447,7 @@ void BackupImpl::readBackupMetadata()
|
|||||||
{
|
{
|
||||||
info.data_file_name = getString(file_config, "data_file", info.file_name);
|
info.data_file_name = getString(file_config, "data_file", info.file_name);
|
||||||
}
|
}
|
||||||
|
info.encrypted_by_disk = getBool(file_config, "encrypted_by_disk", false);
|
||||||
}
|
}
|
||||||
|
|
||||||
file_names.emplace(info.file_name, std::pair{info.size, info.checksum});
|
file_names.emplace(info.file_name, std::pair{info.size, info.checksum});
|
||||||
@ -633,6 +637,11 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const String & file_nam
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const SizeAndChecksum & size_and_checksum) const
|
std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const SizeAndChecksum & size_and_checksum) const
|
||||||
|
{
|
||||||
|
return readFileImpl(size_and_checksum, /* read_encrypted= */ false);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> BackupImpl::readFileImpl(const SizeAndChecksum & size_and_checksum, bool read_encrypted) const
|
||||||
{
|
{
|
||||||
if (open_mode != OpenMode::READ)
|
if (open_mode != OpenMode::READ)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for reading");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for reading");
|
||||||
@ -660,6 +669,14 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const SizeAndChecksum &
|
|||||||
info = it->second;
|
info = it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (info.encrypted_by_disk != read_encrypted)
|
||||||
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::CANNOT_RESTORE_TO_NONENCRYPTED_DISK,
|
||||||
|
"File {} is encrypted in the backup, it can be restored only to an encrypted disk",
|
||||||
|
info.data_file_name);
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<SeekableReadBuffer> read_buffer;
|
std::unique_ptr<SeekableReadBuffer> read_buffer;
|
||||||
std::unique_ptr<SeekableReadBuffer> base_read_buffer;
|
std::unique_ptr<SeekableReadBuffer> base_read_buffer;
|
||||||
|
|
||||||
@ -720,14 +737,14 @@ std::unique_ptr<SeekableReadBuffer> BackupImpl::readFile(const SizeAndChecksum &
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BackupImpl::copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path,
|
size_t BackupImpl::copyFileToDisk(const String & file_name,
|
||||||
WriteMode write_mode, const WriteSettings & write_settings) const
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) const
|
||||||
{
|
{
|
||||||
return copyFileToDisk(getFileSizeAndChecksum(file_name), destination_disk, destination_path, write_mode, write_settings);
|
return copyFileToDisk(getFileSizeAndChecksum(file_name), destination_disk, destination_path, write_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path,
|
size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum,
|
||||||
WriteMode write_mode, const WriteSettings & write_settings) const
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) const
|
||||||
{
|
{
|
||||||
if (open_mode != OpenMode::READ)
|
if (open_mode != OpenMode::READ)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for reading");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for reading");
|
||||||
@ -760,19 +777,26 @@ size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum, Dis
|
|||||||
info = it->second;
|
info = it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (info.encrypted_by_disk && !destination_disk->getDataSourceDescription().is_encrypted)
|
||||||
|
{
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::CANNOT_RESTORE_TO_NONENCRYPTED_DISK,
|
||||||
|
"File {} is encrypted in the backup, it can be restored only to an encrypted disk",
|
||||||
|
info.data_file_name);
|
||||||
|
}
|
||||||
|
|
||||||
bool file_copied = false;
|
bool file_copied = false;
|
||||||
|
|
||||||
if (info.size && !info.base_size && !use_archive)
|
if (info.size && !info.base_size && !use_archive)
|
||||||
{
|
{
|
||||||
/// Data comes completely from this backup.
|
/// Data comes completely from this backup.
|
||||||
reader->copyFileToDisk(info.data_file_name, info.size, destination_disk, destination_path, write_mode, write_settings);
|
reader->copyFileToDisk(info.data_file_name, info.size, info.encrypted_by_disk, destination_disk, destination_path, write_mode);
|
||||||
file_copied = true;
|
file_copied = true;
|
||||||
|
|
||||||
}
|
}
|
||||||
else if (info.size && (info.size == info.base_size))
|
else if (info.size && (info.size == info.base_size))
|
||||||
{
|
{
|
||||||
/// Data comes completely from the base backup (nothing comes from this backup).
|
/// Data comes completely from the base backup (nothing comes from this backup).
|
||||||
base_backup->copyFileToDisk(std::pair{info.base_size, info.base_checksum}, destination_disk, destination_path, write_mode, write_settings);
|
base_backup->copyFileToDisk(std::pair{info.base_size, info.base_checksum}, destination_disk, destination_path, write_mode);
|
||||||
file_copied = true;
|
file_copied = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -786,9 +810,13 @@ size_t BackupImpl::copyFileToDisk(const SizeAndChecksum & size_and_checksum, Dis
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
/// Use the generic way to copy data. `readFile()` will update `num_read_files`.
|
/// Use the generic way to copy data. `readFile()` will update `num_read_files`.
|
||||||
auto read_buffer = readFile(size_and_checksum);
|
auto read_buffer = readFileImpl(size_and_checksum, /* read_encrypted= */ info.encrypted_by_disk);
|
||||||
auto write_buffer = destination_disk->writeFile(destination_path, std::min<size_t>(info.size, DBMS_DEFAULT_BUFFER_SIZE),
|
std::unique_ptr<WriteBuffer> write_buffer;
|
||||||
write_mode, write_settings);
|
size_t buf_size = std::min<size_t>(info.size, reader->getWriteBufferSize());
|
||||||
|
if (info.encrypted_by_disk)
|
||||||
|
write_buffer = destination_disk->writeEncryptedFile(destination_path, buf_size, write_mode, reader->getWriteSettings());
|
||||||
|
else
|
||||||
|
write_buffer = destination_disk->writeFile(destination_path, buf_size, write_mode, reader->getWriteSettings());
|
||||||
copyData(*read_buffer, *write_buffer, info.size);
|
copyData(*read_buffer, *write_buffer, info.size);
|
||||||
write_buffer->finalize();
|
write_buffer->finalize();
|
||||||
}
|
}
|
||||||
@ -805,72 +833,57 @@ void BackupImpl::writeFile(const BackupFileInfo & info, BackupEntryPtr entry)
|
|||||||
if (writing_finalized)
|
if (writing_finalized)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is already finalized");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is already finalized");
|
||||||
|
|
||||||
std::string from_file_name = "memory buffer";
|
bool should_check_lock_file = false;
|
||||||
if (auto fname = entry->getFilePath(); !fname.empty())
|
|
||||||
from_file_name = "file " + fname;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
++num_files;
|
++num_files;
|
||||||
total_size += info.size;
|
total_size += info.size;
|
||||||
|
if (!num_entries)
|
||||||
|
should_check_lock_file = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto src_disk = entry->getDisk();
|
||||||
|
auto src_file_path = entry->getFilePath();
|
||||||
|
bool from_immutable_file = entry->isFromImmutableFile();
|
||||||
|
String src_file_desc = src_file_path.empty() ? "memory buffer" : ("file " + src_file_path);
|
||||||
|
|
||||||
if (info.data_file_name.empty())
|
if (info.data_file_name.empty())
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Writing backup for file {} from {}: skipped, {}", info.data_file_name, from_file_name, !info.size ? "empty" : "base backup has it");
|
LOG_TRACE(log, "Writing backup for file {} from {}: skipped, {}", info.data_file_name, src_file_desc, !info.size ? "empty" : "base backup has it");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!coordination->startWritingFile(info.data_file_index))
|
if (!coordination->startWritingFile(info.data_file_index))
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Writing backup for file {} from {}: skipped, data file #{} is already being written", info.data_file_name, from_file_name, info.data_file_index);
|
LOG_TRACE(log, "Writing backup for file {} from {}: skipped, data file #{} is already being written", info.data_file_name, src_file_desc, info.data_file_index);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TRACE(log, "Writing backup for file {} from {}: data file #{}", info.data_file_name, from_file_name, info.data_file_index);
|
if (!should_check_lock_file)
|
||||||
|
checkLockFile(true);
|
||||||
|
|
||||||
auto writer_description = writer->getDataSourceDescription();
|
/// NOTE: `mutex` must be unlocked during copying otherwise writing will be in one thread maximum and hence slow.
|
||||||
auto reader_description = entry->getDataSourceDescription();
|
|
||||||
|
|
||||||
/// We need to copy whole file without archive, we can do it faster
|
if (use_archive)
|
||||||
/// if source and destination are compatible
|
|
||||||
if (!use_archive && writer->supportNativeCopy(reader_description))
|
|
||||||
{
|
{
|
||||||
/// Should be much faster than writing data through server.
|
LOG_TRACE(log, "Writing backup for file {} from {}: data file #{}, adding to archive", info.data_file_name, src_file_desc, info.data_file_index);
|
||||||
LOG_TRACE(log, "Will copy file {} using native copy", info.data_file_name);
|
auto out = archive_writer->writeFile(info.data_file_name);
|
||||||
|
auto read_buffer = entry->getReadBuffer(writer->getReadSettings());
|
||||||
/// NOTE: `mutex` must be unlocked here otherwise writing will be in one thread maximum and hence slow.
|
if (info.base_size != 0)
|
||||||
|
read_buffer->seek(info.base_size, SEEK_SET);
|
||||||
writer->copyFileNative(entry->tryGetDiskIfExists(), entry->getFilePath(), info.base_size, info.size - info.base_size, info.data_file_name);
|
copyData(*read_buffer, *out);
|
||||||
|
out->finalize();
|
||||||
|
}
|
||||||
|
else if (src_disk && from_immutable_file)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Writing backup for file {} from {} (disk {}): data file #{}", info.data_file_name, src_file_desc, src_disk->getName(), info.data_file_index);
|
||||||
|
writer->copyFileFromDisk(info.data_file_name, src_disk, src_file_path, info.encrypted_by_disk, info.base_size, info.size - info.base_size);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
bool has_entries = false;
|
LOG_TRACE(log, "Writing backup for file {} from {}: data file #{}", info.data_file_name, src_file_desc, info.data_file_index);
|
||||||
{
|
auto create_read_buffer = [entry, read_settings = writer->getReadSettings()] { return entry->getReadBuffer(read_settings); };
|
||||||
std::lock_guard lock{mutex};
|
writer->copyDataToFile(info.data_file_name, create_read_buffer, info.base_size, info.size - info.base_size);
|
||||||
has_entries = num_entries > 0;
|
|
||||||
}
|
|
||||||
if (!has_entries)
|
|
||||||
checkLockFile(true);
|
|
||||||
|
|
||||||
if (use_archive)
|
|
||||||
{
|
|
||||||
LOG_TRACE(log, "Adding file {} to archive", info.data_file_name);
|
|
||||||
auto out = archive_writer->writeFile(info.data_file_name);
|
|
||||||
auto read_buffer = entry->getReadBuffer();
|
|
||||||
if (info.base_size != 0)
|
|
||||||
read_buffer->seek(info.base_size, SEEK_SET);
|
|
||||||
copyData(*read_buffer, *out);
|
|
||||||
out->finalize();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
LOG_TRACE(log, "Will copy file {}", info.data_file_name);
|
|
||||||
auto create_read_buffer = [entry] { return entry->getReadBuffer(); };
|
|
||||||
|
|
||||||
/// NOTE: `mutex` must be unlocked here otherwise writing will be in one thread maximum and hence slow.
|
|
||||||
writer->copyDataToFile(create_read_buffer, info.base_size, info.size - info.base_size, info.data_file_name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -76,10 +76,8 @@ public:
|
|||||||
SizeAndChecksum getFileSizeAndChecksum(const String & file_name) const override;
|
SizeAndChecksum getFileSizeAndChecksum(const String & file_name) const override;
|
||||||
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) const override;
|
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) const override;
|
||||||
std::unique_ptr<SeekableReadBuffer> readFile(const SizeAndChecksum & size_and_checksum) const override;
|
std::unique_ptr<SeekableReadBuffer> readFile(const SizeAndChecksum & size_and_checksum) const override;
|
||||||
size_t copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path,
|
size_t copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) const override;
|
||||||
WriteMode write_mode, const WriteSettings & write_settings) const override;
|
size_t copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) const override;
|
||||||
size_t copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path,
|
|
||||||
WriteMode write_mode, const WriteSettings & write_settings) const override;
|
|
||||||
void writeFile(const BackupFileInfo & info, BackupEntryPtr entry) override;
|
void writeFile(const BackupFileInfo & info, BackupEntryPtr entry) override;
|
||||||
void finalizeWriting() override;
|
void finalizeWriting() override;
|
||||||
bool supportsWritingInMultipleThreads() const override { return !use_archive; }
|
bool supportsWritingInMultipleThreads() const override { return !use_archive; }
|
||||||
@ -109,6 +107,8 @@ private:
|
|||||||
/// Calculates and sets `compressed_size`.
|
/// Calculates and sets `compressed_size`.
|
||||||
void setCompressedSize();
|
void setCompressedSize();
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> readFileImpl(const SizeAndChecksum & size_and_checksum, bool read_encrypted) const;
|
||||||
|
|
||||||
const String backup_name_for_logging;
|
const String backup_name_for_logging;
|
||||||
const bool use_archive;
|
const bool use_archive;
|
||||||
const ArchiveParams archive_params;
|
const ArchiveParams archive_params;
|
||||||
|
@ -23,6 +23,7 @@ namespace ErrorCodes
|
|||||||
M(String, password) \
|
M(String, password) \
|
||||||
M(Bool, structure_only) \
|
M(Bool, structure_only) \
|
||||||
M(Bool, async) \
|
M(Bool, async) \
|
||||||
|
M(Bool, decrypt_files_from_encrypted_disks) \
|
||||||
M(Bool, deduplicate_files) \
|
M(Bool, deduplicate_files) \
|
||||||
M(UInt64, shard_num) \
|
M(UInt64, shard_num) \
|
||||||
M(UInt64, replica_num) \
|
M(UInt64, replica_num) \
|
||||||
|
@ -32,6 +32,9 @@ struct BackupSettings
|
|||||||
/// Whether the BACKUP command must return immediately without waiting until the backup has completed.
|
/// Whether the BACKUP command must return immediately without waiting until the backup has completed.
|
||||||
bool async = false;
|
bool async = false;
|
||||||
|
|
||||||
|
/// Whether the BACKUP command should decrypt files stored on encrypted disks.
|
||||||
|
bool decrypt_files_from_encrypted_disks = false;
|
||||||
|
|
||||||
/// Whether the BACKUP will omit similar files (within one backup only).
|
/// Whether the BACKUP will omit similar files (within one backup only).
|
||||||
bool deduplicate_files = true;
|
bool deduplicate_files = true;
|
||||||
|
|
||||||
|
@ -368,6 +368,7 @@ void BackupsWorker::doBackup(
|
|||||||
|
|
||||||
/// Wait until all the hosts have written their backup entries.
|
/// Wait until all the hosts have written their backup entries.
|
||||||
backup_coordination->waitForStage(Stage::COMPLETED);
|
backup_coordination->waitForStage(Stage::COMPLETED);
|
||||||
|
backup_coordination->setStage(Stage::COMPLETED,"");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -385,7 +386,7 @@ void BackupsWorker::doBackup(
|
|||||||
writeBackupEntries(backup, std::move(backup_entries), backup_id, backup_coordination, backup_settings.internal);
|
writeBackupEntries(backup, std::move(backup_entries), backup_id, backup_coordination, backup_settings.internal);
|
||||||
|
|
||||||
/// We have written our backup entries, we need to tell other hosts (they could be waiting for it).
|
/// We have written our backup entries, we need to tell other hosts (they could be waiting for it).
|
||||||
backup_coordination->setStage(Stage::COMPLETED, "");
|
backup_coordination->setStage(Stage::COMPLETED,"");
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_files = 0;
|
size_t num_files = 0;
|
||||||
@ -654,12 +655,26 @@ void BackupsWorker::doRestore(
|
|||||||
/// (If this isn't ON CLUSTER query RestorerFromBackup will check access rights later.)
|
/// (If this isn't ON CLUSTER query RestorerFromBackup will check access rights later.)
|
||||||
ClusterPtr cluster;
|
ClusterPtr cluster;
|
||||||
bool on_cluster = !restore_query->cluster.empty();
|
bool on_cluster = !restore_query->cluster.empty();
|
||||||
|
|
||||||
if (on_cluster)
|
if (on_cluster)
|
||||||
{
|
{
|
||||||
restore_query->cluster = context->getMacros()->expand(restore_query->cluster);
|
restore_query->cluster = context->getMacros()->expand(restore_query->cluster);
|
||||||
cluster = context->getCluster(restore_query->cluster);
|
cluster = context->getCluster(restore_query->cluster);
|
||||||
restore_settings.cluster_host_ids = cluster->getHostIDs();
|
restore_settings.cluster_host_ids = cluster->getHostIDs();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Make a restore coordination.
|
||||||
|
if (!restore_coordination)
|
||||||
|
restore_coordination = makeRestoreCoordination(context, restore_settings, /* remote= */ on_cluster);
|
||||||
|
|
||||||
|
if (!allow_concurrent_restores && restore_coordination->hasConcurrentRestores(std::ref(num_active_restores)))
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED,
|
||||||
|
"Concurrent restores not supported, turn on setting 'allow_concurrent_restores'");
|
||||||
|
|
||||||
|
|
||||||
|
if (on_cluster)
|
||||||
|
{
|
||||||
/// We cannot just use access checking provided by the function executeDDLQueryOnCluster(): it would be incorrect
|
/// We cannot just use access checking provided by the function executeDDLQueryOnCluster(): it would be incorrect
|
||||||
/// because different replicas can contain different set of tables and so the required access rights can differ too.
|
/// because different replicas can contain different set of tables and so the required access rights can differ too.
|
||||||
/// So the right way is pass through the entire cluster and check access for each host.
|
/// So the right way is pass through the entire cluster and check access for each host.
|
||||||
@ -676,15 +691,6 @@ void BackupsWorker::doRestore(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Make a restore coordination.
|
|
||||||
if (!restore_coordination)
|
|
||||||
restore_coordination = makeRestoreCoordination(context, restore_settings, /* remote= */ on_cluster);
|
|
||||||
|
|
||||||
if (!allow_concurrent_restores && restore_coordination->hasConcurrentRestores(std::ref(num_active_restores)))
|
|
||||||
throw Exception(
|
|
||||||
ErrorCodes::CONCURRENT_ACCESS_NOT_SUPPORTED,
|
|
||||||
"Concurrent restores not supported, turn on setting 'allow_concurrent_restores'");
|
|
||||||
|
|
||||||
/// Do RESTORE.
|
/// Do RESTORE.
|
||||||
if (on_cluster)
|
if (on_cluster)
|
||||||
{
|
{
|
||||||
@ -703,6 +709,7 @@ void BackupsWorker::doRestore(
|
|||||||
|
|
||||||
/// Wait until all the hosts have written their backup entries.
|
/// Wait until all the hosts have written their backup entries.
|
||||||
restore_coordination->waitForStage(Stage::COMPLETED);
|
restore_coordination->waitForStage(Stage::COMPLETED);
|
||||||
|
restore_coordination->setStage(Stage::COMPLETED,"");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -109,10 +109,10 @@ public:
|
|||||||
|
|
||||||
/// Copies a file from the backup to a specified destination disk. Returns the number of bytes written.
|
/// Copies a file from the backup to a specified destination disk. Returns the number of bytes written.
|
||||||
virtual size_t copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path,
|
virtual size_t copyFileToDisk(const String & file_name, DiskPtr destination_disk, const String & destination_path,
|
||||||
WriteMode write_mode = WriteMode::Rewrite, const WriteSettings & write_settings = {}) const = 0;
|
WriteMode write_mode = WriteMode::Rewrite) const = 0;
|
||||||
|
|
||||||
virtual size_t copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path,
|
virtual size_t copyFileToDisk(const SizeAndChecksum & size_and_checksum, DiskPtr destination_disk, const String & destination_path,
|
||||||
WriteMode write_mode = WriteMode::Rewrite, const WriteSettings & write_settings = {}) const = 0;
|
WriteMode write_mode = WriteMode::Rewrite) const = 0;
|
||||||
|
|
||||||
/// Puts a new entry to the backup.
|
/// Puts a new entry to the backup.
|
||||||
virtual void writeFile(const BackupFileInfo & file_info, BackupEntryPtr entry) = 0;
|
virtual void writeFile(const BackupFileInfo & file_info, BackupEntryPtr entry) = 0;
|
||||||
|
@ -17,23 +17,16 @@ class IBackupEntriesLazyBatch::BackupEntryFromBatch : public IBackupEntry
|
|||||||
public:
|
public:
|
||||||
BackupEntryFromBatch(const std::shared_ptr<IBackupEntriesLazyBatch> & batch_, size_t index_) : batch(batch_), index(index_) { }
|
BackupEntryFromBatch(const std::shared_ptr<IBackupEntriesLazyBatch> & batch_, size_t index_) : batch(batch_), index(index_) { }
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const override { return getInternalBackupEntry()->getReadBuffer(read_settings); }
|
||||||
UInt64 getSize() const override { return getInternalBackupEntry()->getSize(); }
|
UInt64 getSize() const override { return getInternalBackupEntry()->getSize(); }
|
||||||
std::optional<UInt128> getChecksum() const override { return getInternalBackupEntry()->getChecksum(); }
|
UInt128 getChecksum() const override { return getInternalBackupEntry()->getChecksum(); }
|
||||||
std::unique_ptr<SeekableReadBuffer> getReadBuffer() const override { return getInternalBackupEntry()->getReadBuffer(); }
|
std::optional<UInt128> getPartialChecksum(size_t prefix_length) const override { return getInternalBackupEntry()->getPartialChecksum(prefix_length); }
|
||||||
String getFilePath() const override
|
DataSourceDescription getDataSourceDescription() const override { return getInternalBackupEntry()->getDataSourceDescription(); }
|
||||||
{
|
bool isEncryptedByDisk() const override { return getInternalBackupEntry()->isEncryptedByDisk(); }
|
||||||
return getInternalBackupEntry()->getFilePath();
|
bool isFromFile() const override { return getInternalBackupEntry()->isFromFile(); }
|
||||||
}
|
bool isFromImmutableFile() const override { return getInternalBackupEntry()->isFromImmutableFile(); }
|
||||||
|
String getFilePath() const override { return getInternalBackupEntry()->getFilePath(); }
|
||||||
DiskPtr tryGetDiskIfExists() const override
|
DiskPtr getDisk() const override { return getInternalBackupEntry()->getDisk(); }
|
||||||
{
|
|
||||||
return getInternalBackupEntry()->tryGetDiskIfExists();
|
|
||||||
}
|
|
||||||
|
|
||||||
DataSourceDescription getDataSourceDescription() const override
|
|
||||||
{
|
|
||||||
return getInternalBackupEntry()->getDataSourceDescription();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
BackupEntryPtr getInternalBackupEntry() const
|
BackupEntryPtr getInternalBackupEntry() const
|
||||||
|
@ -20,16 +20,24 @@ public:
|
|||||||
/// Returns the size of the data.
|
/// Returns the size of the data.
|
||||||
virtual UInt64 getSize() const = 0;
|
virtual UInt64 getSize() const = 0;
|
||||||
|
|
||||||
/// Returns the checksum of the data if it's precalculated.
|
/// Returns the checksum of the data.
|
||||||
/// Can return nullopt which means the checksum should be calculated from the read buffer.
|
virtual UInt128 getChecksum() const = 0;
|
||||||
virtual std::optional<UInt128> getChecksum() const { return {}; }
|
|
||||||
|
/// Returns a partial checksum, i.e. the checksum calculated for a prefix part of the data.
|
||||||
|
/// Can return nullopt if the partial checksum is too difficult to calculate.
|
||||||
|
virtual std::optional<UInt128> getPartialChecksum(size_t /* prefix_length */) const { return {}; }
|
||||||
|
|
||||||
/// Returns a read buffer for reading the data.
|
/// Returns a read buffer for reading the data.
|
||||||
virtual std::unique_ptr<SeekableReadBuffer> getReadBuffer() const = 0;
|
virtual std::unique_ptr<SeekableReadBuffer> getReadBuffer(const ReadSettings & read_settings) const = 0;
|
||||||
|
|
||||||
virtual String getFilePath() const = 0;
|
/// Returns true if the data returned by getReadBuffer() is encrypted by an encrypted disk.
|
||||||
|
virtual bool isEncryptedByDisk() const { return false; }
|
||||||
|
|
||||||
virtual DiskPtr tryGetDiskIfExists() const = 0;
|
/// Returns information about disk and file if this backup entry is generated from a file.
|
||||||
|
virtual bool isFromFile() const { return false; }
|
||||||
|
virtual bool isFromImmutableFile() const { return false; }
|
||||||
|
virtual String getFilePath() const { return ""; }
|
||||||
|
virtual DiskPtr getDisk() const { return nullptr; }
|
||||||
|
|
||||||
virtual DataSourceDescription getDataSourceDescription() const = 0;
|
virtual DataSourceDescription getDataSourceDescription() const = 0;
|
||||||
};
|
};
|
||||||
|
@ -93,7 +93,10 @@ void RestoreCoordinationRemote::createRootNodes()
|
|||||||
|
|
||||||
void RestoreCoordinationRemote::setStage(const String & new_stage, const String & message)
|
void RestoreCoordinationRemote::setStage(const String & new_stage, const String & message)
|
||||||
{
|
{
|
||||||
stage_sync->set(current_host, new_stage, message);
|
if (is_internal)
|
||||||
|
stage_sync->set(current_host, new_stage, message);
|
||||||
|
else
|
||||||
|
stage_sync->set(current_host, new_stage, /* message */ "", /* all_hosts */ true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestoreCoordinationRemote::setError(const Exception & exception)
|
void RestoreCoordinationRemote::setError(const Exception & exception)
|
||||||
@ -283,8 +286,8 @@ bool RestoreCoordinationRemote::hasConcurrentRestores(const std::atomic<size_t>
|
|||||||
String status;
|
String status;
|
||||||
if (zk->tryGet(root_zookeeper_path + "/" + existing_restore_path + "/stage", status))
|
if (zk->tryGet(root_zookeeper_path + "/" + existing_restore_path + "/stage", status))
|
||||||
{
|
{
|
||||||
/// If status is not COMPLETED it could be because the restore failed, check if 'error' exists
|
/// Check if some other restore is in progress
|
||||||
if (status != Stage::COMPLETED && !zk->exists(root_zookeeper_path + "/" + existing_restore_path + "/error"))
|
if (status == Stage::SCHEDULED_TO_START)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Found a concurrent restore: {}, current restore: {}", existing_restore_uuid, toString(restore_uuid));
|
LOG_WARNING(log, "Found a concurrent restore: {}, current restore: {}", existing_restore_uuid, toString(restore_uuid));
|
||||||
result = true;
|
result = true;
|
||||||
|
@ -169,9 +169,9 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
|
|||||||
{
|
{
|
||||||
std::shared_ptr<IBackupReader> reader;
|
std::shared_ptr<IBackupReader> reader;
|
||||||
if (engine_name == "File")
|
if (engine_name == "File")
|
||||||
reader = std::make_shared<BackupReaderFile>(path);
|
reader = std::make_shared<BackupReaderFile>(path, params.context);
|
||||||
else
|
else
|
||||||
reader = std::make_shared<BackupReaderDisk>(disk, path);
|
reader = std::make_shared<BackupReaderDisk>(disk, path, params.context);
|
||||||
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
|
return std::make_unique<BackupImpl>(backup_name_for_logging, archive_params, params.base_backup_info, reader, params.context);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -528,7 +528,7 @@ target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::fast_float)
|
|||||||
|
|
||||||
if (USE_ORC)
|
if (USE_ORC)
|
||||||
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})
|
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})
|
||||||
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR} "${CMAKE_BINARY_DIR}/contrib/orc/c++/include")
|
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR} "${PROJECT_BINARY_DIR}/contrib/orc/c++/include")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (TARGET ch_contrib::rocksdb)
|
if (TARGET ch_contrib::rocksdb)
|
||||||
|
@ -1246,6 +1246,14 @@ void ClientBase::setInsertionTable(const ASTInsertQuery & insert_query)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ClientBase::addMultiquery(std::string_view query, Arguments & common_arguments) const
|
||||||
|
{
|
||||||
|
common_arguments.emplace_back("--multiquery");
|
||||||
|
common_arguments.emplace_back("-q");
|
||||||
|
common_arguments.emplace_back(query);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr parsed_query)
|
void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr parsed_query)
|
||||||
{
|
{
|
||||||
auto query = query_to_execute;
|
auto query = query_to_execute;
|
||||||
@ -2592,15 +2600,19 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
("version-clean", "print version in machine-readable format and exit")
|
("version-clean", "print version in machine-readable format and exit")
|
||||||
|
|
||||||
("config-file,C", po::value<std::string>(), "config-file path")
|
("config-file,C", po::value<std::string>(), "config-file path")
|
||||||
("queries-file", po::value<std::vector<std::string>>()->multitoken(),
|
|
||||||
"file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)")
|
|
||||||
("database,d", po::value<std::string>(), "database")
|
|
||||||
("history_file", po::value<std::string>(), "path to history file")
|
|
||||||
|
|
||||||
("query,q", po::value<std::string>(), "query")
|
("query,q", po::value<std::string>(), "query")
|
||||||
("stage", po::value<std::string>()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit")
|
("queries-file", po::value<std::vector<std::string>>()->multitoken(),
|
||||||
|
"file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)")
|
||||||
|
("multiquery,n", "If specified, multiple queries separated by semicolons can be listed after --query. For convenience, it is also possible to omit --query and pass the queries directly after --multiquery.")
|
||||||
|
("multiline,m", "If specified, allow multiline queries (do not send the query on Enter)")
|
||||||
|
("database,d", po::value<std::string>(), "database")
|
||||||
("query_kind", po::value<std::string>()->default_value("initial_query"), "One of initial_query/secondary_query/no_query")
|
("query_kind", po::value<std::string>()->default_value("initial_query"), "One of initial_query/secondary_query/no_query")
|
||||||
("query_id", po::value<std::string>(), "query_id")
|
("query_id", po::value<std::string>(), "query_id")
|
||||||
|
|
||||||
|
("history_file", po::value<std::string>(), "path to history file")
|
||||||
|
|
||||||
|
("stage", po::value<std::string>()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit")
|
||||||
("progress", po::value<ProgressOption>()->implicit_value(ProgressOption::TTY, "tty")->default_value(ProgressOption::DEFAULT, "default"), "Print progress of queries execution - to TTY: tty|on|1|true|yes; to STDERR non-interactive mode: err; OFF: off|0|false|no; DEFAULT - interactive to TTY, non-interactive is off")
|
("progress", po::value<ProgressOption>()->implicit_value(ProgressOption::TTY, "tty")->default_value(ProgressOption::DEFAULT, "default"), "Print progress of queries execution - to TTY: tty|on|1|true|yes; to STDERR non-interactive mode: err; OFF: off|0|false|no; DEFAULT - interactive to TTY, non-interactive is off")
|
||||||
|
|
||||||
("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.")
|
("disable_suggestion,A", "Disable loading suggestion data. Note that suggestion data is loaded asynchronously through a second connection to ClickHouse server. Also it is reasonable to disable suggestion if you want to paste a query with TAB characters. Shorthand option -A is for those who get used to mysql client.")
|
||||||
@ -2612,9 +2624,6 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
("log-level", po::value<std::string>(), "log level")
|
("log-level", po::value<std::string>(), "log level")
|
||||||
("server_logs_file", po::value<std::string>(), "put server logs into specified file")
|
("server_logs_file", po::value<std::string>(), "put server logs into specified file")
|
||||||
|
|
||||||
("multiline,m", "multiline")
|
|
||||||
("multiquery,n", "multiquery")
|
|
||||||
|
|
||||||
("suggestion_limit", po::value<int>()->default_value(10000),
|
("suggestion_limit", po::value<int>()->default_value(10000),
|
||||||
"Suggestion limit for how many databases, tables and columns to fetch.")
|
"Suggestion limit for how many databases, tables and columns to fetch.")
|
||||||
|
|
||||||
|
@ -129,6 +129,7 @@ protected:
|
|||||||
|
|
||||||
void setInsertionTable(const ASTInsertQuery & insert_query);
|
void setInsertionTable(const ASTInsertQuery & insert_query);
|
||||||
|
|
||||||
|
void addMultiquery(std::string_view query, Arguments & common_arguments) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void receiveResult(ASTPtr parsed_query, Int32 signals_before_stop, bool partial_result_on_first_cancel);
|
void receiveResult(ASTPtr parsed_query, Int32 signals_before_stop, bool partial_result_on_first_cancel);
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
|
extern const Event DistributedConnectionTries;
|
||||||
|
extern const Event DistributedConnectionUsable;
|
||||||
extern const Event DistributedConnectionMissingTable;
|
extern const Event DistributedConnectionMissingTable;
|
||||||
extern const Event DistributedConnectionStaleReplica;
|
extern const Event DistributedConnectionStaleReplica;
|
||||||
}
|
}
|
||||||
@ -35,6 +37,7 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
|
|||||||
SCOPE_EXIT(is_finished = true);
|
SCOPE_EXIT(is_finished = true);
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::DistributedConnectionTries);
|
||||||
result.entry = pool->get(*timeouts, settings, /* force_connected = */ false);
|
result.entry = pool->get(*timeouts, settings, /* force_connected = */ false);
|
||||||
AsyncCallbackSetter async_setter(&*result.entry, std::move(async_callback));
|
AsyncCallbackSetter async_setter(&*result.entry, std::move(async_callback));
|
||||||
|
|
||||||
@ -45,6 +48,7 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
|
|||||||
if (!table_to_check || server_revision < DBMS_MIN_REVISION_WITH_TABLES_STATUS)
|
if (!table_to_check || server_revision < DBMS_MIN_REVISION_WITH_TABLES_STATUS)
|
||||||
{
|
{
|
||||||
result.entry->forceConnected(*timeouts);
|
result.entry->forceConnected(*timeouts);
|
||||||
|
ProfileEvents::increment(ProfileEvents::DistributedConnectionUsable);
|
||||||
result.is_usable = true;
|
result.is_usable = true;
|
||||||
result.is_up_to_date = true;
|
result.is_up_to_date = true;
|
||||||
return;
|
return;
|
||||||
@ -65,6 +69,7 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ProfileEvents::increment(ProfileEvents::DistributedConnectionUsable);
|
||||||
result.is_usable = true;
|
result.is_usable = true;
|
||||||
|
|
||||||
UInt64 max_allowed_delay = settings ? UInt64(settings->max_replica_delay_for_distributed_queries) : 0;
|
UInt64 max_allowed_delay = settings ? UInt64(settings->max_replica_delay_for_distributed_queries) : 0;
|
||||||
|
@ -135,7 +135,6 @@ private:
|
|||||||
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
|
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
|
||||||
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
|
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
|
||||||
Int64 priority; /// priority from <remote_servers>
|
Int64 priority; /// priority from <remote_servers>
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -192,6 +191,7 @@ inline bool operator==(const ConnectionPoolFactory::Key & lhs, const ConnectionP
|
|||||||
{
|
{
|
||||||
return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port
|
return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port
|
||||||
&& lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password
|
&& lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password
|
||||||
|
&& lhs.quota_key == rhs.quota_key
|
||||||
&& lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name
|
&& lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name
|
||||||
&& lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority;
|
&& lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority;
|
||||||
}
|
}
|
||||||
|
@ -73,9 +73,9 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
|
|||||||
|
|
||||||
Int64 ConnectionPoolWithFailover::getPriority() const
|
Int64 ConnectionPoolWithFailover::getPriority() const
|
||||||
{
|
{
|
||||||
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto &a, const auto &b)
|
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
|
||||||
{
|
{
|
||||||
return a->getPriority() - b->getPriority();
|
return a->getPriority() < b->getPriority();
|
||||||
}))->getPriority();
|
}))->getPriority();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,8 @@
|
|||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
|
||||||
|
|
||||||
|
#define DATE_SECONDS_PER_DAY 86400 /// Number of seconds in a day, 60 * 60 * 24
|
||||||
|
|
||||||
#define DATE_LUT_MIN_YEAR 1900 /// 1900 since majority of financial organizations consider 1900 as an initial year.
|
#define DATE_LUT_MIN_YEAR 1900 /// 1900 since majority of financial organizations consider 1900 as an initial year.
|
||||||
#define DATE_LUT_MAX_YEAR 2299 /// Last supported year (complete)
|
#define DATE_LUT_MAX_YEAR 2299 /// Last supported year (complete)
|
||||||
#define DATE_LUT_YEARS (1 + DATE_LUT_MAX_YEAR - DATE_LUT_MIN_YEAR) /// Number of years in lookup table
|
#define DATE_LUT_YEARS (1 + DATE_LUT_MAX_YEAR - DATE_LUT_MIN_YEAR) /// Number of years in lookup table
|
||||||
|
@ -579,6 +579,7 @@
|
|||||||
M(694, ASYNC_LOAD_CYCLE) \
|
M(694, ASYNC_LOAD_CYCLE) \
|
||||||
M(695, ASYNC_LOAD_FAILED) \
|
M(695, ASYNC_LOAD_FAILED) \
|
||||||
M(696, ASYNC_LOAD_CANCELED) \
|
M(696, ASYNC_LOAD_CANCELED) \
|
||||||
|
M(697, CANNOT_RESTORE_TO_NONENCRYPTED_DISK) \
|
||||||
\
|
\
|
||||||
M(999, KEEPER_EXCEPTION) \
|
M(999, KEEPER_EXCEPTION) \
|
||||||
M(1000, POCO_EXCEPTION) \
|
M(1000, POCO_EXCEPTION) \
|
||||||
|
@ -10,6 +10,10 @@
|
|||||||
* Instead of this class, you could just use the pair (version, key) in the HashSet as the key
|
* Instead of this class, you could just use the pair (version, key) in the HashSet as the key
|
||||||
* but then the table would accumulate all the keys that it ever stored, and it was unreasonably growing.
|
* but then the table would accumulate all the keys that it ever stored, and it was unreasonably growing.
|
||||||
* This class goes a step further and considers the keys with the old version empty in the hash table.
|
* This class goes a step further and considers the keys with the old version empty in the hash table.
|
||||||
|
*
|
||||||
|
* Zero values note:
|
||||||
|
* A cell in ClearableHashSet can store a zero values as normal value
|
||||||
|
* If its version is equal to the version of the set itself, then it's not considered as empty even key's value is zero value of the corresponding type
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
@ -48,30 +52,6 @@ struct ClearableHashTableCell : public BaseCell
|
|||||||
ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {}
|
ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
using StringRefBaseCell = HashSetCellWithSavedHash<StringRef, DefaultHash<StringRef>, ClearableHashSetState>;
|
|
||||||
|
|
||||||
/// specialization for StringRef to allow zero size key (empty string)
|
|
||||||
template <>
|
|
||||||
struct ClearableHashTableCell<StringRef, StringRefBaseCell> : public StringRefBaseCell
|
|
||||||
{
|
|
||||||
using State = ClearableHashSetState;
|
|
||||||
using value_type = typename StringRefBaseCell::value_type;
|
|
||||||
|
|
||||||
UInt32 version;
|
|
||||||
|
|
||||||
bool isZero(const State & state) const { return version != state.version; }
|
|
||||||
static bool isZero(const StringRef & key_, const State & state_) { return StringRefBaseCell::isZero(key_, state_); }
|
|
||||||
|
|
||||||
/// Set the key value to zero.
|
|
||||||
void setZero() { version = 0; }
|
|
||||||
|
|
||||||
/// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table).
|
|
||||||
static constexpr bool need_zero_value_storage = true;
|
|
||||||
|
|
||||||
ClearableHashTableCell() { } /// NOLINT
|
|
||||||
ClearableHashTableCell(const StringRef & key_, const State & state) : StringRefBaseCell(key_, state), version(state.version) { }
|
|
||||||
};
|
|
||||||
|
|
||||||
template <
|
template <
|
||||||
typename Key,
|
typename Key,
|
||||||
typename Hash = DefaultHash<Key>,
|
typename Hash = DefaultHash<Key>,
|
||||||
@ -90,13 +70,6 @@ public:
|
|||||||
{
|
{
|
||||||
++this->version;
|
++this->version;
|
||||||
this->m_size = 0;
|
this->m_size = 0;
|
||||||
|
|
||||||
if constexpr (Cell::need_zero_value_storage)
|
|
||||||
{
|
|
||||||
/// clear ZeroValueStorage
|
|
||||||
if (this->hasZero())
|
|
||||||
this->clearHasZero();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -119,13 +92,6 @@ public:
|
|||||||
{
|
{
|
||||||
++this->version;
|
++this->version;
|
||||||
this->m_size = 0;
|
this->m_size = 0;
|
||||||
|
|
||||||
if constexpr (Cell::need_zero_value_storage)
|
|
||||||
{
|
|
||||||
/// clear ZeroValueStorage
|
|
||||||
if (this->hasZero())
|
|
||||||
this->clearHasZero();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -358,7 +358,7 @@ public:
|
|||||||
std::pair<LookupResult, bool> res;
|
std::pair<LookupResult, bool> res;
|
||||||
emplace(Cell::getKey(x), res.first, res.second);
|
emplace(Cell::getKey(x), res.first, res.second);
|
||||||
if (res.second)
|
if (res.second)
|
||||||
insertSetMapped(res.first->getMapped(), x);
|
res.first->setMapped(x);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,8 @@
|
|||||||
/** NOTE HashMap could only be used for memmoveable (position independent) types.
|
/** NOTE HashMap could only be used for memmoveable (position independent) types.
|
||||||
* Example: std::string is not position independent in libstdc++ with C++11 ABI or in libc++.
|
* Example: std::string is not position independent in libstdc++ with C++11 ABI or in libc++.
|
||||||
* Also, key in hash table must be of type, that zero bytes is compared equals to zero key.
|
* Also, key in hash table must be of type, that zero bytes is compared equals to zero key.
|
||||||
|
*
|
||||||
|
* Please keep in sync with PackedHashMap.h
|
||||||
*/
|
*/
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -53,13 +55,13 @@ PairNoInit<std::decay_t<First>, std::decay_t<Second>> makePairNoInit(First && fi
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template <typename Key, typename TMapped, typename Hash, typename TState = HashTableNoState>
|
template <typename Key, typename TMapped, typename Hash, typename TState = HashTableNoState, typename Pair = PairNoInit<Key, TMapped>>
|
||||||
struct HashMapCell
|
struct HashMapCell
|
||||||
{
|
{
|
||||||
using Mapped = TMapped;
|
using Mapped = TMapped;
|
||||||
using State = TState;
|
using State = TState;
|
||||||
|
|
||||||
using value_type = PairNoInit<Key, Mapped>;
|
using value_type = Pair;
|
||||||
using mapped_type = Mapped;
|
using mapped_type = Mapped;
|
||||||
using key_type = Key;
|
using key_type = Key;
|
||||||
|
|
||||||
@ -151,14 +153,14 @@ struct HashMapCell
|
|||||||
namespace std
|
namespace std
|
||||||
{
|
{
|
||||||
|
|
||||||
template <typename Key, typename TMapped, typename Hash, typename TState>
|
template <typename Key, typename TMapped, typename Hash, typename TState, typename Pair>
|
||||||
struct tuple_size<HashMapCell<Key, TMapped, Hash, TState>> : std::integral_constant<size_t, 2> { };
|
struct tuple_size<HashMapCell<Key, TMapped, Hash, TState, Pair>> : std::integral_constant<size_t, 2> { };
|
||||||
|
|
||||||
template <typename Key, typename TMapped, typename Hash, typename TState>
|
template <typename Key, typename TMapped, typename Hash, typename TState, typename Pair>
|
||||||
struct tuple_element<0, HashMapCell<Key, TMapped, Hash, TState>> { using type = Key; };
|
struct tuple_element<0, HashMapCell<Key, TMapped, Hash, TState, Pair>> { using type = Key; };
|
||||||
|
|
||||||
template <typename Key, typename TMapped, typename Hash, typename TState>
|
template <typename Key, typename TMapped, typename Hash, typename TState, typename Pair>
|
||||||
struct tuple_element<1, HashMapCell<Key, TMapped, Hash, TState>> { using type = TMapped; };
|
struct tuple_element<1, HashMapCell<Key, TMapped, Hash, TState, Pair>> { using type = TMapped; };
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Key, typename TMapped, typename Hash, typename TState = HashTableNoState>
|
template <typename Key, typename TMapped, typename Hash, typename TState = HashTableNoState>
|
||||||
|
@ -41,6 +41,8 @@ public:
|
|||||||
using Base = HashTable<Key, TCell, Hash, Grower, Allocator>;
|
using Base = HashTable<Key, TCell, Hash, Grower, Allocator>;
|
||||||
using typename Base::LookupResult;
|
using typename Base::LookupResult;
|
||||||
|
|
||||||
|
using Base::Base;
|
||||||
|
|
||||||
void merge(const Self & rhs)
|
void merge(const Self & rhs)
|
||||||
{
|
{
|
||||||
if (!this->hasZero() && rhs.hasZero())
|
if (!this->hasZero() && rhs.hasZero())
|
||||||
|
@ -117,7 +117,7 @@ inline bool bitEquals(T && a, T && b)
|
|||||||
* 3) Hash tables that store the key and do not have a "mapped" value, e.g. the normal HashTable.
|
* 3) Hash tables that store the key and do not have a "mapped" value, e.g. the normal HashTable.
|
||||||
* GetKey returns the key, and GetMapped returns a zero void pointer. This simplifies generic
|
* GetKey returns the key, and GetMapped returns a zero void pointer. This simplifies generic
|
||||||
* code that works with mapped values: it can overload on the return type of GetMapped(), and
|
* code that works with mapped values: it can overload on the return type of GetMapped(), and
|
||||||
* doesn't need other parameters. One example is insertSetMapped() function.
|
* doesn't need other parameters. One example is Cell::setMapped() function.
|
||||||
*
|
*
|
||||||
* 4) Hash tables that store both the key and the "mapped" value, e.g. HashMap. Both GetKey and
|
* 4) Hash tables that store both the key and the "mapped" value, e.g. HashMap. Both GetKey and
|
||||||
* GetMapped are supported.
|
* GetMapped are supported.
|
||||||
@ -216,17 +216,6 @@ struct HashTableCell
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* A helper function for HashTable::insert() to set the "mapped" value.
|
|
||||||
* Overloaded on the mapped type, does nothing if it's VoidMapped.
|
|
||||||
*/
|
|
||||||
template <typename ValueType>
|
|
||||||
void insertSetMapped(VoidMapped /* dest */, const ValueType & /* src */) {}
|
|
||||||
|
|
||||||
template <typename MappedType, typename ValueType>
|
|
||||||
void insertSetMapped(MappedType & dest, const ValueType & src) { dest = src.second; }
|
|
||||||
|
|
||||||
|
|
||||||
/** Determines the size of the hash table, and when and how much it should be resized.
|
/** Determines the size of the hash table, and when and how much it should be resized.
|
||||||
* Has very small state (one UInt8) and useful for Set-s allocated in automatic memory (see uniqExact as an example).
|
* Has very small state (one UInt8) and useful for Set-s allocated in automatic memory (see uniqExact as an example).
|
||||||
*/
|
*/
|
||||||
@ -241,6 +230,8 @@ struct HashTableGrower
|
|||||||
/// If collision resolution chains are contiguous, we can implement erase operation by moving the elements.
|
/// If collision resolution chains are contiguous, we can implement erase operation by moving the elements.
|
||||||
static constexpr auto performs_linear_probing_with_single_step = true;
|
static constexpr auto performs_linear_probing_with_single_step = true;
|
||||||
|
|
||||||
|
static constexpr size_t max_size_degree = 23;
|
||||||
|
|
||||||
/// The size of the hash table in the cells.
|
/// The size of the hash table in the cells.
|
||||||
size_t bufSize() const { return 1ULL << size_degree; }
|
size_t bufSize() const { return 1ULL << size_degree; }
|
||||||
|
|
||||||
@ -259,17 +250,18 @@ struct HashTableGrower
|
|||||||
/// Increase the size of the hash table.
|
/// Increase the size of the hash table.
|
||||||
void increaseSize()
|
void increaseSize()
|
||||||
{
|
{
|
||||||
size_degree += size_degree >= 23 ? 1 : 2;
|
size_degree += size_degree >= max_size_degree ? 1 : 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the buffer size by the number of elements in the hash table. Used when deserializing a hash table.
|
/// Set the buffer size by the number of elements in the hash table. Used when deserializing a hash table.
|
||||||
void set(size_t num_elems)
|
void set(size_t num_elems)
|
||||||
{
|
{
|
||||||
size_degree = num_elems <= 1
|
if (num_elems <= 1)
|
||||||
? initial_size_degree
|
size_degree = initial_size_degree;
|
||||||
: ((initial_size_degree > static_cast<size_t>(log2(num_elems - 1)) + 2)
|
else if (initial_size_degree > static_cast<size_t>(log2(num_elems - 1)) + 2)
|
||||||
? initial_size_degree
|
size_degree = initial_size_degree;
|
||||||
: (static_cast<size_t>(log2(num_elems - 1)) + 2));
|
else
|
||||||
|
size_degree = static_cast<size_t>(log2(num_elems - 1)) + 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setBufSize(size_t buf_size_)
|
void setBufSize(size_t buf_size_)
|
||||||
@ -281,6 +273,7 @@ struct HashTableGrower
|
|||||||
/** Determines the size of the hash table, and when and how much it should be resized.
|
/** Determines the size of the hash table, and when and how much it should be resized.
|
||||||
* This structure is aligned to cache line boundary and also occupies it all.
|
* This structure is aligned to cache line boundary and also occupies it all.
|
||||||
* Precalculates some values to speed up lookups and insertion into the HashTable (and thus has bigger memory footprint than HashTableGrower).
|
* Precalculates some values to speed up lookups and insertion into the HashTable (and thus has bigger memory footprint than HashTableGrower).
|
||||||
|
* This grower assume 0.5 load factor
|
||||||
*/
|
*/
|
||||||
template <size_t initial_size_degree = 8>
|
template <size_t initial_size_degree = 8>
|
||||||
class alignas(64) HashTableGrowerWithPrecalculation
|
class alignas(64) HashTableGrowerWithPrecalculation
|
||||||
@ -290,6 +283,7 @@ class alignas(64) HashTableGrowerWithPrecalculation
|
|||||||
UInt8 size_degree = initial_size_degree;
|
UInt8 size_degree = initial_size_degree;
|
||||||
size_t precalculated_mask = (1ULL << initial_size_degree) - 1;
|
size_t precalculated_mask = (1ULL << initial_size_degree) - 1;
|
||||||
size_t precalculated_max_fill = 1ULL << (initial_size_degree - 1);
|
size_t precalculated_max_fill = 1ULL << (initial_size_degree - 1);
|
||||||
|
static constexpr size_t max_size_degree = 23;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
UInt8 sizeDegree() const { return size_degree; }
|
UInt8 sizeDegree() const { return size_degree; }
|
||||||
@ -319,16 +313,17 @@ public:
|
|||||||
bool overflow(size_t elems) const { return elems > precalculated_max_fill; }
|
bool overflow(size_t elems) const { return elems > precalculated_max_fill; }
|
||||||
|
|
||||||
/// Increase the size of the hash table.
|
/// Increase the size of the hash table.
|
||||||
void increaseSize() { increaseSizeDegree(size_degree >= 23 ? 1 : 2); }
|
void increaseSize() { increaseSizeDegree(size_degree >= max_size_degree ? 1 : 2); }
|
||||||
|
|
||||||
/// Set the buffer size by the number of elements in the hash table. Used when deserializing a hash table.
|
/// Set the buffer size by the number of elements in the hash table. Used when deserializing a hash table.
|
||||||
void set(size_t num_elems)
|
void set(size_t num_elems)
|
||||||
{
|
{
|
||||||
size_degree = num_elems <= 1
|
if (num_elems <= 1)
|
||||||
? initial_size_degree
|
size_degree = initial_size_degree;
|
||||||
: ((initial_size_degree > static_cast<size_t>(log2(num_elems - 1)) + 2)
|
else if (initial_size_degree > static_cast<size_t>(log2(num_elems - 1)) + 2)
|
||||||
? initial_size_degree
|
size_degree = initial_size_degree;
|
||||||
: (static_cast<size_t>(log2(num_elems - 1)) + 2));
|
else
|
||||||
|
size_degree = static_cast<size_t>(log2(num_elems - 1)) + 2;
|
||||||
increaseSizeDegree(0);
|
increaseSizeDegree(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -753,6 +748,7 @@ protected:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
using key_type = Key;
|
using key_type = Key;
|
||||||
|
using grower_type = Grower;
|
||||||
using mapped_type = typename Cell::mapped_type;
|
using mapped_type = typename Cell::mapped_type;
|
||||||
using value_type = typename Cell::value_type;
|
using value_type = typename Cell::value_type;
|
||||||
using cell_type = Cell;
|
using cell_type = Cell;
|
||||||
@ -770,6 +766,14 @@ public:
|
|||||||
alloc(grower);
|
alloc(grower);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
explicit HashTable(const Grower & grower_)
|
||||||
|
: grower(grower_)
|
||||||
|
{
|
||||||
|
if (Cell::need_zero_value_storage)
|
||||||
|
this->zeroValue()->setZero();
|
||||||
|
alloc(grower);
|
||||||
|
}
|
||||||
|
|
||||||
HashTable(size_t reserve_for_num_elements) /// NOLINT
|
HashTable(size_t reserve_for_num_elements) /// NOLINT
|
||||||
{
|
{
|
||||||
if (Cell::need_zero_value_storage)
|
if (Cell::need_zero_value_storage)
|
||||||
@ -1037,7 +1041,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (res.second)
|
if (res.second)
|
||||||
insertSetMapped(res.first->getMapped(), x);
|
res.first->setMapped(x);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user