Merge branch 'master' into stack_trace_in_part_log

This commit is contained in:
Alexander Tokmakov 2023-02-03 20:05:00 +03:00 committed by GitHub
commit 3f11948bb0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
300 changed files with 5608 additions and 2777 deletions

View File

@ -2813,6 +2813,217 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
# Parallel replicas
FunctionalStatefulTestDebugParallelReplicas:
needs: [BuilderDebDebug]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateful_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateful tests (debug, ParallelReplicas)
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestUBsanParallelReplicas:
needs: [BuilderDebUBsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateful_ubsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateful tests (ubsan, ParallelReplicas)
REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse
KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestMsanParallelReplicas:
needs: [BuilderDebMsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateful_msan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateful tests (msan, ParallelReplicas)
REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse
KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestTsanParallelReplicas:
needs: [BuilderDebTsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateful_tsan
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateful tests (tsan, ParallelReplicas)
REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse
KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAsanParallelReplicas:
needs: [BuilderDebAsan]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateful_debug
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateful tests (asan, ParallelReplicas)
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestReleaseParallelReplicas:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateful_release
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateful tests (release, ParallelReplicas)
REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse
KILL_TIMEOUT=3600
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
##############################################################################################

3
.gitmodules vendored
View File

@ -257,6 +257,9 @@
[submodule "contrib/qpl"]
path = contrib/qpl
url = https://github.com/intel/qpl
[submodule "contrib/idxd-config"]
path = contrib/idxd-config
url = https://github.com/intel/idxd-config
[submodule "contrib/wyhash"]
path = contrib/wyhash
url = https://github.com/wangyi-fudan/wyhash

2
contrib/aws vendored

@ -1 +1 @@
Subproject commit 4a12641211d4dbc8e2fdb2dd0f1eea0927db9252
Subproject commit 06a6610e6fb3385e22ad85014a67aa307825ffb1

2
contrib/azure vendored

@ -1 +1 @@
Subproject commit ea8c3044f43f5afa7016d2d580ed201f495d7e94
Subproject commit e4fcdfc81e337e589ce231a452dcc280fcbb3f99

1
contrib/idxd-config vendored Submodule

@ -0,0 +1 @@
Subproject commit f6605c41a735e3fdfef2d2d18655a33af6490b99

2
contrib/qpl vendored

@ -1 +1 @@
Subproject commit becb7a1b15bdb4845ec3721a550707ffa51d029d
Subproject commit d75a29d95d8a548297fce3549d21020005364dc8

View File

@ -10,11 +10,30 @@ if (NOT ENABLE_QPL)
return()
endif()
## QPL has build dependency on libaccel-config. Here is to build libaccel-config which is required by QPL.
## libaccel-config is the utility library for controlling and configuring Intel® In-Memory Analytics Accelerator (Intel® IAA).
set (LIBACCEL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/idxd-config")
set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake")
set (LIBACCEL_HEADER_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake/idxd-header")
set (SRCS
"${LIBACCEL_SOURCE_DIR}/accfg/lib/libaccfg.c"
"${LIBACCEL_SOURCE_DIR}/util/log.c"
"${LIBACCEL_SOURCE_DIR}/util/sysfs.c"
)
add_library(accel-config ${SRCS})
target_compile_options(accel-config PRIVATE "-D_GNU_SOURCE")
target_include_directories(accel-config BEFORE
PRIVATE ${UUID_DIR}
PRIVATE ${LIBACCEL_HEADER_DIR}
PRIVATE ${LIBACCEL_SOURCE_DIR})
## QPL build start here.
set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl")
set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources")
set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl")
set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake")
set (EFFICIENT_WAIT OFF)
set (BLOCK_ON_FAULT ON)
set (LOG_HW_INIT OFF)
@ -315,13 +334,8 @@ target_compile_definitions(_qpl
PRIVATE -DQPL_BADARG_CHECK
PUBLIC -DENABLE_QPL_COMPRESSION)
find_library(LIBACCEL accel-config)
if(NOT LIBACCEL)
message(FATAL_ERROR "Please install QPL dependency library:libaccel-config from https://github.com/intel/idxd-config")
endif()
target_link_libraries(_qpl
PRIVATE ${LIBACCEL}
PRIVATE accel-config
PRIVATE ${CMAKE_DL_LIBS})
add_library (ch_contrib::qpl ALIAS _qpl)

View File

@ -0,0 +1,159 @@
/* config.h. Generated from config.h.in by configure. */
/* config.h.in. Generated from configure.ac by autoheader. */
/* Define if building universal (internal helper macro) */
/* #undef AC_APPLE_UNIVERSAL_BUILD */
/* Debug messages. */
/* #undef ENABLE_DEBUG */
/* Documentation / man pages. */
/* #define ENABLE_DOCS */
/* System logging. */
#define ENABLE_LOGGING 1
/* accfg test support */
/* #undef ENABLE_TEST */
/* Define to 1 if big-endian-arch */
/* #undef HAVE_BIG_ENDIAN */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* Define to 1 if you have the <linux/version.h> header file. */
#define HAVE_LINUX_VERSION_H 1
/* Define to 1 if little-endian-arch */
#define HAVE_LITTLE_ENDIAN 1
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* Define to 1 if you have the `secure_getenv' function. */
#define HAVE_SECURE_GETENV 1
/* Define to 1 if you have statement expressions. */
#define HAVE_STATEMENT_EXPR 1
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define to 1 if typeof works with your compiler. */
#define HAVE_TYPEOF 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if using libuuid */
#define HAVE_UUID 1
/* Define to 1 if you have the `__secure_getenv' function. */
/* #undef HAVE___SECURE_GETENV */
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* Name of package */
#define PACKAGE "accel-config"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "linux-dsa@lists.01.org"
/* Define to the full name of this package. */
#define PACKAGE_NAME "accel-config"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "accel-config 3.5.2.gitf6605c41"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "accel-config"
/* Define to the home page for this package. */
#define PACKAGE_URL "https://github.com/xxx/accel-config"
/* Define to the version of this package. */
#define PACKAGE_VERSION "3.5.2.gitf6605c41"
/* Define to 1 if you have the ANSI C header files. */
#define STDC_HEADERS 1
/* Enable extensions on AIX 3, Interix. */
#ifndef _ALL_SOURCE
# define _ALL_SOURCE 1
#endif
/* Enable GNU extensions on systems that have them. */
#ifndef _GNU_SOURCE
# define _GNU_SOURCE 1
#endif
/* Enable threading extensions on Solaris. */
#ifndef _POSIX_PTHREAD_SEMANTICS
# define _POSIX_PTHREAD_SEMANTICS 1
#endif
/* Enable extensions on HP NonStop. */
#ifndef _TANDEM_SOURCE
# define _TANDEM_SOURCE 1
#endif
/* Enable general extensions on Solaris. */
#ifndef __EXTENSIONS__
# define __EXTENSIONS__ 1
#endif
/* Version number of package */
#define VERSION "3.5.2.gitf6605c41"
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
significant byte first (like Motorola and SPARC, unlike Intel). */
#if defined AC_APPLE_UNIVERSAL_BUILD
# if defined __BIG_ENDIAN__
# define WORDS_BIGENDIAN 1
# endif
#else
# ifndef WORDS_BIGENDIAN
/* # undef WORDS_BIGENDIAN */
# endif
#endif
/* Enable large inode numbers on Mac OS X 10.5. */
#ifndef _DARWIN_USE_64_BIT_INODE
# define _DARWIN_USE_64_BIT_INODE 1
#endif
/* Number of bits in a file offset, on hosts where this is settable. */
/* #undef _FILE_OFFSET_BITS */
/* Define for large files, on AIX-style hosts. */
/* #undef _LARGE_FILES */
/* Define to 1 if on MINIX. */
/* #undef _MINIX */
/* Define to 2 if the system does not provide POSIX.1 features except with
this defined. */
/* #undef _POSIX_1_SOURCE */
/* Define to 1 if you need to in order for `stat' and other things to work. */
/* #undef _POSIX_SOURCE */
/* Define to __typeof__ if your compiler spells it that way. */
/* #undef typeof */

View File

@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.1.2.9"
ARG VERSION="23.1.3.5"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="23.1.2.9"
ARG VERSION="23.1.3.5"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image

View File

@ -126,13 +126,16 @@ function run_tests()
fi
set +e
clickhouse-test -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time \
--skip 00168_parallel_processing_on_replicas "${ADDITIONAL_OPTIONS[@]}" \
if [[ -n "$USE_PARALLEL_REPLICAS" ]] && [[ "$USE_PARALLEL_REPLICAS" -eq 1 ]]; then
clickhouse-test --client="clickhouse-client --use_hedged_requests=0 --allow_experimental_parallel_reading_from_replicas=1 \
--max_parallel_replicas=100 --cluster_for_parallel_replicas='parallel_replicas'" \
-j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --no-parallel-replicas --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
clickhouse-test --timeout 1200 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time \
00168_parallel_processing_on_replicas "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
else
clickhouse-test -j 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
fi
set -e
}

View File

@ -134,9 +134,9 @@ function run_tests()
set +e
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
set -e
}

View File

@ -13,15 +13,28 @@ sysctl kernel.core_pattern='core.%e.%p-%P'
OK="\tOK\t\\N\t"
FAIL="\tFAIL\t\\N\t"
FAILURE_CONTEXT_LINES=50
FAILURE_CONTEXT_MAX_LINE_WIDTH=400
function escaped()
{
# That's the simplest way I found to escape a string in bash. Yep, bash is the most convenient programming language.
clickhouse local -S 's String' --input-format=LineAsString -q "select * from table format CustomSeparated settings format_custom_row_after_delimiter='\\\\\\\\n'"
# Also limit lines width just in case (too long lines are not really useful usually)
clickhouse local -S 's String' --input-format=LineAsString -q "select substr(s, 1, $FAILURE_CONTEXT_MAX_LINE_WIDTH)
from table format CustomSeparated settings format_custom_row_after_delimiter='\\\\\\\\n'"
}
function head_escaped()
{
head -50 $1 | escaped
head -n $FAILURE_CONTEXT_LINES $1 | escaped
}
function unts()
{
grep -Po "[0-9][0-9]:[0-9][0-9] \K.*"
}
function trim_server_logs()
{
head -n $FAILURE_CONTEXT_LINES "/test_output/$1" | grep -Eo " \[ [0-9]+ \] \{.*" | escaped
}
function install_packages()
@ -167,7 +180,7 @@ function start()
then
echo "Cannot start clickhouse-server"
rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt ||:
echo -e "Cannot start clickhouse-server$FAIL$(head_escaped /test_output/application_errors.txt)" >> /test_output/test_results.tsv
echo -e "Cannot start clickhouse-server$FAIL$(trim_server_logs application_errors.txt)" >> /test_output/test_results.tsv
cat /var/log/clickhouse-server/stdout.log
tail -n100 /var/log/clickhouse-server/stderr.log
tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | rg -F -v -e '<Warning> RaftInstance:' -e '<Information> RaftInstance' | tail -n100
@ -389,7 +402,7 @@ start
# NOTE Hung check is implemented in docker/tests/stress/stress
rg -Fa "No queries hung" /test_output/test_results.tsv | grep -Fa "OK" \
|| echo -e "Hung check failed, possible deadlock found (see hung_check.log)$FAIL$(head_escaped /test_output/hung_check.log)"
|| echo -e "Hung check failed, possible deadlock found (see hung_check.log)$FAIL$(head_escaped /test_output/hung_check.log | unts)"
stop
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.stress.log
@ -402,7 +415,7 @@ start
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
&& echo -e "Server failed to start (see application_errors.txt and clickhouse-server.clean.log)$FAIL$(head_escaped /test_output/application_errors.txt)" \
&& echo -e "Server failed to start (see application_errors.txt and clickhouse-server.clean.log)$FAIL$(trim_server_logs application_errors.txt)" \
>> /test_output/test_results.tsv)
stop
@ -435,7 +448,7 @@ rg -Fa "Code: 49. DB::Exception: " /var/log/clickhouse-server/clickhouse-server*
# No such key errors
rg --text "Code: 499.*The specified key does not exist" /var/log/clickhouse-server/clickhouse-server*.log > /test_output/no_such_key_errors.txt \
&& echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(head_escaped /test_output/no_such_key_errors.txt)" >> /test_output/test_results.tsv \
&& echo -e "S3_ERROR No such key thrown (see clickhouse-server.log or no_such_key_errors.txt)$FAIL$(trim_server_logs no_such_key_errors.txt)" >> /test_output/test_results.tsv \
|| echo -e "No lost s3 keys$OK" >> /test_output/test_results.tsv
# Remove file no_such_key_errors.txt if it's empty
@ -448,7 +461,7 @@ rg -Fa "########################################" /var/log/clickhouse-server/cli
# It also checks for crash without stacktrace (printed by watchdog)
rg -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server*.log > /test_output/fatal_messages.txt \
&& echo -e "Fatal message in clickhouse-server.log (see fatal_messages.txt)$FAIL$(head_escaped /test_output/fatal_messages.txt)" >> /test_output/test_results.tsv \
&& echo -e "Fatal message in clickhouse-server.log (see fatal_messages.txt)$FAIL$(trim_server_logs fatal_messages.txt)" >> /test_output/test_results.tsv \
|| echo -e "No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
# Remove file fatal_messages.txt if it's empty
@ -457,8 +470,13 @@ rg -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server*.log > /test_out
rg -Fa "########################################" /test_output/* > /dev/null \
&& echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv
function get_gdb_log_context()
{
rg -A50 -Fa " received signal " /test_output/gdb.log | head_escaped
}
rg -Fa " received signal " /test_output/gdb.log > /dev/null \
&& echo -e "Found signal in gdb.log$FAIL$(rg -A50 -Fa " received signal " /test_output/gdb.log | escaped)" >> /test_output/test_results.tsv
&& echo -e "Found signal in gdb.log$FAIL$(get_gdb_log_context)" >> /test_output/test_results.tsv
if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
echo -e "Backward compatibility check\n"
@ -579,7 +597,7 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
start 500
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt \
&& echo -e "Backward compatibility check: Server failed to start$FAIL$(head_escaped /test_output/bc_check_application_errors.txt)" >> /test_output/test_results.tsv)
&& echo -e "Backward compatibility check: Server failed to start$FAIL$(trim_server_logs bc_check_application_errors.txt)" >> /test_output/test_results.tsv)
clickhouse-client --query="SELECT 'Server version: ', version()"
@ -634,7 +652,7 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
-e "Session expired" \
-e "TOO_MANY_PARTS" \
/var/log/clickhouse-server/clickhouse-server.backward.dirty.log | rg -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
&& echo -e "Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)$FAIL$(head_escaped /test_output/bc_check_error_messages.txt)" \
&& echo -e "Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)$FAIL$(trim_server_logs bc_check_error_messages.txt)" \
>> /test_output/test_results.tsv \
|| echo -e "Backward compatibility check: No Error messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv
@ -657,7 +675,7 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
# Logical errors
echo "Check for Logical errors in server log:"
rg -Fa -A20 "Code: 49. DB::Exception:" /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_logical_errors.txt \
&& echo -e "Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)$FAIL$(head_escaped /test_output/bc_check_logical_errors.txt)" \
&& echo -e "Backward compatibility check: Logical error thrown (see clickhouse-server.log or bc_check_logical_errors.txt)$FAIL$(trim_server_logs bc_check_logical_errors.txt)" \
>> /test_output/test_results.tsv \
|| echo -e "Backward compatibility check: No logical errors$OK" >> /test_output/test_results.tsv
@ -672,7 +690,7 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
# It also checks for crash without stacktrace (printed by watchdog)
echo "Check for Fatal message in server log:"
rg -Fa " <Fatal> " /var/log/clickhouse-server/clickhouse-server.backward.*.log > /test_output/bc_check_fatal_messages.txt \
&& echo -e "Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)$FAIL$(head_escaped /test_output/bc_check_fatal_messages.txt)" \
&& echo -e "Backward compatibility check: Fatal message in clickhouse-server.log (see bc_check_fatal_messages.txt)$FAIL$(trim_server_logs bc_check_fatal_messages.txt)" \
>> /test_output/test_results.tsv \
|| echo -e "Backward compatibility check: No fatal messages in clickhouse-server.log$OK" >> /test_output/test_results.tsv

View File

@ -85,8 +85,16 @@ def process_test_log(log_path):
if DATABASE_SIGN in line:
test_end = True
# Python does not support TSV, so we have to escape '\t' and '\n' manually
# and hope that complex escape sequences will not break anything
test_results = [
(test[0], test[1], test[2], "".join(test[3])[:4096]) for test in test_results
(
test[0],
test[1],
test[2],
"".join(test[3])[:4096].replace("\t", "\\t").replace("\n", "\\n"),
)
for test in test_results
]
return (

View File

@ -0,0 +1,17 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.1.3.5-stable (548b494bcce) FIXME as compared to v23.1.2.9-stable (8dfb1700858)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#45896](https://github.com/ClickHouse/ClickHouse/issues/45896): Bugfix IPv6 parser for mixed ip4 address with missed first octet (like `::.1.2.3`). [#45871](https://github.com/ClickHouse/ClickHouse/pull/45871) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Get rid of progress timestamps in release publishing [#45818](https://github.com/ClickHouse/ClickHouse/pull/45818) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -16,6 +16,11 @@ Tests are located in `queries` directory. There are two subdirectories: `statele
Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`.
:::note
A common mistake when testing data types `DateTime` and `DateTime64` is assuming that the server uses a specific time zone (e.g. "UTC"). This is not the case, time zones in CI test runs
are deliberately randomized. The easiest workaround is to specify the time zone for test values explicitly, e.g. `toDateTime64(val, 3, 'Europe/Amsterdam')`.
:::
### Running a Test Locally {#functional-test-locally}
Start the ClickHouse server locally, listening on the default port (9000). To

View File

@ -77,9 +77,12 @@ Optional parameters:
- `rabbitmq_password` - RabbitMQ password.
- `rabbitmq_commit_on_select` - Commit messages when select query is made. Default: `false`.
- `rabbitmq_max_rows_per_message` — The maximum number of rows written in one RabbitMQ message for row-based formats. Default : `1`.
- `rabbitmq_empty_queue_backoff_start` — A start backoff point to reschedule read if the rabbitmq queue is empty.
- `rabbitmq_empty_queue_backoff_end` — An end backoff point to reschedule read if the rabbitmq queue is empty.
SSL connection:
* [ ] SSL connection:
Use either `rabbitmq_secure = 1` or `amqps` in connection address: `rabbitmq_address = 'amqps://guest:guest@localhost/vhost'`.
The default behaviour of the used library is not to check if the created TLS connection is sufficiently secure. Whether the certificate is expired, self-signed, missing or invalid: the connection is simply permitted. More strict checking of certificates can possibly be implemented in the future.

View File

@ -86,9 +86,9 @@ CREATE TABLE hackernews (
author String,
timestamp DateTime,
comment String,
dead UInt8,
parent UInt64,
poll UInt64,
dead UInt8,
parent UInt64,
poll UInt64,
children Array(UInt32),
url String,
score UInt32,
@ -171,9 +171,23 @@ WHERE hasToken(lower(comment), 'clickhouse')
1 row in set. Elapsed: 0.747 sec. Processed 4.49 million rows, 1.77 GB (6.01 million rows/s., 2.37 GB/s.)
```
We can also search for one or all of multiple terms, i.e., disjunctions or conjunctions:
```sql
-- multiple OR'ed terms
SELECT count(*)
FROM hackernews
WHERE multiSearchAny(lower(comment), ['oltp', 'olap']);
-- multiple AND'ed terms
SELECT count(*)
FROM hackernews
WHERE hasToken(lower(comment), 'avx') AND hasToken(lower(comment), 'sve');
```
:::note
Unlike other secondary indices, inverted indexes (for now) map to row numbers (row ids) instead of granule ids. The reason for this design
is performance. In practice, users often search for multiple terms at once. For example, filter predicate `WHERE s LIKE '%little%' OR s LIKE
'%big%'` can be evaluated directly using an inverted index by forming the union of the row id lists for terms "little" and "big". This also
means that the parameter `GRANULARITY` supplied to index creation has no meaning (it may be removed from the syntax in the future).
:::
:::

View File

@ -2,15 +2,16 @@
slug: /en/sql-reference/statements/delete
sidebar_position: 36
sidebar_label: DELETE
description: Lightweight deletes simplify the process of deleting data from the database.
keywords: [delete]
title: DELETE Statement
---
# DELETE Statement
``` sql
DELETE FROM [db.]table [ON CLUSTER cluster] [WHERE expr]
```
`DELETE FROM` removes rows from table `[db.]table` that match expression `expr`. The deleted rows are marked as deleted immediately and will be automatically filtered out of all subsequent queries. Cleanup of data happens asynchronously in background. This feature is only available for MergeTree table engine family.
`DELETE FROM` removes rows from the table `[db.]table` that match the expression `expr`. The deleted rows are marked as deleted immediately and will be automatically filtered out of all subsequent queries. Cleanup of data happens asynchronously in the background. This feature is only available for the MergeTree table engine family.
For example, the following query deletes all rows from the `hits` table where the `Title` column contains the text `hello`:
@ -32,7 +33,7 @@ SET allow_experimental_lightweight_delete = true;
An [alternative way to delete rows](./alter/delete.md) in ClickHouse is `ALTER TABLE ... DELETE`, which might be more efficient if you do bulk deletes only occasionally and don't need the operation to be applied instantly. In most use cases the new lightweight `DELETE FROM` behavior will be considerably faster.
:::warning
Even though deletes are becoming more lightweight in ClickHouse, they should still not be used as aggressively as on an OLTP system. Ligthweight deletes are currently efficient for wide parts, but for compact parts they can be a heavyweight operation, and it may be better to use `ALTER TABLE` for some scenarios.
Even though deletes are becoming more lightweight in ClickHouse, they should still not be used as aggressively as on an OLTP system. Lightweight deletes are currently efficient for wide parts, but for compact parts, they can be a heavyweight operation, and it may be better to use `ALTER TABLE` for some scenarios.
:::
:::note
@ -41,3 +42,34 @@ Even though deletes are becoming more lightweight in ClickHouse, they should sti
grant ALTER DELETE ON db.table to username;
```
:::
## Lightweight Delete Internals
The idea behind Lightweight Delete is that when a `DELETE FROM table ...` query is executed ClickHouse only saves a mask where each row is marked as either “existing” or as “deleted”. Those “deleted” rows become invisible for subsequent queries, but physically the rows are removed only later by subsequent merges. Writing this mask is usually much more lightweight than what is done by `ALTER table DELETE ...` query.
### How it is implemented
The mask is implemented as a hidden `_row_exists` system column that stores True for all visible rows and False for deleted ones. This column is only present in a part if some rows in this part were deleted. In other words, the column is not persisted when it has all values equal to True.
## SELECT query
When the column is present `SELECT ... FROM table WHERE condition` query internally is extended by an additional predicate on `_row_exists` and becomes similar to
```sql
SELECT ... FROM table PREWHERE _row_exists WHERE condition
```
At execution time the column `_row_exists` is read to figure out which rows are not visible and if there are many deleted rows it can figure out which granules can be fully skipped when reading the rest of the columns.
## DELETE query
`DELETE FROM table WHERE condition` is translated into `ALTER table UPDATE _row_exists = 0 WHERE condition` mutation. Internally this mutation is executed in 2 steps:
1. `SELECT count() FROM table WHERE condition` for each individual part to figure out if the part is affected.
2. Mutate affected parts, and make hardlinks for unaffected parts. Mutating a part in fact only writes `_row_exists` column and just hardlinks all other columns files in the case of Wide parts. But for Compact parts, all columns are rewritten because they all are stored together in one file.
So if we compare Lightweight Delete to `ALTER DELETE` in the first step they both do the same thing to figure out which parts are affected, but in the second step `ALTER DELETE` does much more work because it reads and rewrites all columns files for the affected parts.
With the described implementation now we can see what can negatively affect 'DELETE FROM' execution time:
- Heavy WHERE condition in DELETE query
- Mutations queue filled with other mutations, because all mutations on a table are executed sequentially
- Table having a very large number of data parts
- Having a lot of data in Compact parts—in a Compact part, all columns are stored in one file.
:::note
This implementation might change in the future.
:::

View File

@ -474,7 +474,7 @@ private:
executor.sendQuery(ClientInfo::QueryKind::INITIAL_QUERY);
ProfileInfo info;
while (Block block = executor.read())
while (Block block = executor.readBlock())
info.update(block);
executor.finish();

View File

@ -2040,7 +2040,7 @@ UInt64 ClusterCopier::executeQueryOnCluster(
while (true)
{
auto block = remote_query_executor->read();
auto block = remote_query_executor->readBlock();
if (!block)
break;
}

View File

@ -19,6 +19,9 @@ target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib)
if (TARGET ch_rust::skim)
target_link_libraries(clickhouse-local-lib PRIVATE ch_rust::skim)
endif()
if (TARGET ch_contrib::azure_sdk)
target_link_libraries(clickhouse-local-lib PRIVATE ch_contrib::azure_sdk)
endif()
# Always use internal readpassphrase
target_link_libraries(clickhouse-local-lib PRIVATE readpassphrase)

View File

@ -51,6 +51,10 @@
#include <Functions/getFuzzerData.h>
#endif
#if USE_AZURE_BLOB_STORAGE
# include <azure/storage/common/internal/xml_wrapper.hpp>
#endif
namespace fs = std::filesystem;
@ -115,6 +119,14 @@ void LocalServer::initialize(Poco::Util::Application & self)
config().getUInt("thread_pool_queue_size", 10000)
);
#if USE_AZURE_BLOB_STORAGE
/// See the explanation near the same line in Server.cpp
GlobalThreadPool::instance().addOnDestroyCallback([]
{
Azure::Storage::_internal::XmlGlobalDeinitialize();
});
#endif
IOThreadPool::initialize(
config().getUInt("max_io_thread_pool_size", 100),
config().getUInt("max_io_thread_pool_free_size", 0),

View File

@ -27,6 +27,9 @@ set (CLICKHOUSE_SERVER_LINK
if (TARGET ch_contrib::jemalloc)
list(APPEND CLICKHOUSE_SERVER_LINK PRIVATE ch_contrib::jemalloc)
endif()
if (TARGET ch_contrib::azure_sdk)
list(APPEND CLICKHOUSE_SERVER_LINK PRIVATE ch_contrib::azure_sdk)
endif()
clickhouse_program_add(server)

View File

@ -128,6 +128,10 @@
# include <jemalloc/jemalloc.h>
#endif
#if USE_AZURE_BLOB_STORAGE
# include <azure/storage/common/internal/xml_wrapper.hpp>
#endif
namespace CurrentMetrics
{
extern const Metric Revision;
@ -750,6 +754,19 @@ try
config().getUInt("max_thread_pool_free_size", 1000),
config().getUInt("thread_pool_queue_size", 10000));
#if USE_AZURE_BLOB_STORAGE
/// It makes sense to deinitialize libxml after joining of all threads
/// in global pool because libxml uses thread-local memory allocations via
/// 'pthread_key_create' and 'pthread_setspecific' which should be deallocated
/// at 'pthread_exit'. Deinitialization of libxml leads to call of 'pthread_key_delete'
/// and if it is done before joining of threads, allocated memory will not be freed
/// and there may be memory leaks in threads that used libxml.
GlobalThreadPool::instance().addOnDestroyCallback([]
{
Azure::Storage::_internal::XmlGlobalDeinitialize();
});
#endif
IOThreadPool::initialize(
config().getUInt("max_io_thread_pool_size", 100),
config().getUInt("max_io_thread_pool_free_size", 0),

View File

@ -854,6 +854,51 @@
</replica>
</shard-->
</test_cluster_one_shard_three_replicas_localhost>
<parallel_replicas>
<shard>
<internal_replication>false</internal_replication>
<replica>
<host>127.0.0.1</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.2</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.3</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.4</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.5</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.6</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.7</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.8</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.9</host>
<port>9000</port>
</replica>
<replica>
<host>127.0.0.10</host>
<port>9000</port>
</replica>
</shard>
</parallel_replicas>
<test_cluster_two_shards_localhost>
<shard>
<replica>

View File

@ -11,11 +11,26 @@ mod ffi {
struct Item {
text: String,
orig_text: String,
}
impl Item {
fn new(text: String) -> Self {
return Self{
// Text that will be shown should not contains new lines since in this case skim may
// live some symbols on the screen, and this looks odd.
text: text.replace("\n", " "),
orig_text: text,
};
}
}
impl SkimItem for Item {
fn text(&self) -> Cow<str> {
return Cow::Borrowed(&self.text);
}
fn output(&self) -> Cow<str> {
return Cow::Borrowed(&self.orig_text);
}
}
fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String, String> {
@ -34,7 +49,7 @@ fn skim(prefix: &CxxString, words: &CxxVector<CxxString>) -> Result<String, Stri
let (tx, rx): (SkimItemSender, SkimItemReceiver) = unbounded();
for word in words {
tx.send(Arc::new(Item{ text: word.to_string() })).unwrap();
tx.send(Arc::new(Item::new(word.to_string()))).unwrap();
}
// so that skim could know when to stop waiting for more items.
drop(tx);

View File

@ -146,6 +146,7 @@ enum class AccessType
M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_FILESYSTEM_CACHE, "SYSTEM DROP FILESYSTEM CACHE, DROP FILESYSTEM CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_SCHEMA_CACHE, "SYSTEM DROP SCHEMA CACHE, DROP SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_S3_CLIENT_CACHE, "SYSTEM DROP S3 CLIENT, DROP S3 CLIENT CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \
M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \
M(SYSTEM_RELOAD_USERS, "RELOAD USERS", GLOBAL, SYSTEM_RELOAD) \

View File

@ -9,13 +9,12 @@
#include <IO/WriteBufferFromS3.h>
#include <IO/HTTPHeaderEntries.h>
#include <IO/S3/copyS3File.h>
#include <IO/S3/Client.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <aws/core/auth/AWSCredentials.h>
#include <aws/s3/S3Client.h>
#include <aws/s3/model/DeleteObjectRequest.h>
#include <aws/s3/model/DeleteObjectsRequest.h>
#include <aws/s3/model/ListObjectsRequest.h>
#include <filesystem>
@ -31,7 +30,7 @@ namespace ErrorCodes
namespace
{
std::shared_ptr<Aws::S3::S3Client>
std::shared_ptr<S3::Client>
makeS3Client(const S3::URI & s3_uri, const String & access_key_id, const String & secret_access_key, const ContextPtr & context)
{
auto settings = context->getStorageS3Settings().getSettings(s3_uri.uri.toString());
@ -71,9 +70,9 @@ namespace
context->getConfigRef().getBool("s3.use_insecure_imds_request", false)));
}
Aws::Vector<Aws::S3::Model::Object> listObjects(Aws::S3::S3Client & client, const S3::URI & s3_uri, const String & file_name)
Aws::Vector<Aws::S3::Model::Object> listObjects(S3::Client & client, const S3::URI & s3_uri, const String & file_name)
{
Aws::S3::Model::ListObjectsRequest request;
S3::ListObjectsRequest request;
request.SetBucket(s3_uri.bucket);
request.SetPrefix(fs::path{s3_uri.key} / file_name);
request.SetMaxKeys(1);
@ -228,7 +227,7 @@ std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
void BackupWriterS3::removeFile(const String & file_name)
{
Aws::S3::Model::DeleteObjectRequest request;
S3::DeleteObjectRequest request;
request.SetBucket(s3_uri.bucket);
request.SetKey(fs::path(s3_uri.key) / file_name);
auto outcome = client->DeleteObject(request);
@ -285,7 +284,7 @@ void BackupWriterS3::removeFilesBatch(const Strings & file_names)
Aws::S3::Model::Delete delkeys;
delkeys.SetObjects(current_chunk);
Aws::S3::Model::DeleteObjectsRequest request;
S3::DeleteObjectsRequest request;
request.SetBucket(s3_uri.bucket);
request.SetDelete(delkeys);

View File

@ -7,7 +7,6 @@
#include <IO/ReadSettings.h>
#include <IO/S3Common.h>
#include <Storages/StorageS3Settings.h>
#include <aws/s3/S3Client.h>
namespace DB
@ -27,7 +26,7 @@ public:
private:
S3::URI s3_uri;
std::shared_ptr<Aws::S3::S3Client> client;
std::shared_ptr<S3::Client> client;
ReadSettings read_settings;
S3Settings::RequestSettings request_settings;
};
@ -73,7 +72,7 @@ private:
void removeFilesBatch(const Strings & file_names);
S3::URI s3_uri;
std::shared_ptr<Aws::S3::S3Client> client;
std::shared_ptr<S3::Client> client;
ReadSettings read_settings;
S3Settings::RequestSettings request_settings;
Poco::Logger * log;

View File

@ -271,16 +271,22 @@ size_t BackupImpl::getNumFiles() const
return num_files;
}
size_t BackupImpl::getNumProcessedFiles() const
UInt64 BackupImpl::getTotalSize() const
{
std::lock_guard lock{mutex};
return num_processed_files;
return total_size;
}
UInt64 BackupImpl::getProcessedFilesSize() const
size_t BackupImpl::getNumEntries() const
{
std::lock_guard lock{mutex};
return processed_files_size;
return num_entries;
}
UInt64 BackupImpl::getSizeOfEntries() const
{
std::lock_guard lock{mutex};
return size_of_entries;
}
UInt64 BackupImpl::getUncompressedSize() const
@ -295,6 +301,18 @@ UInt64 BackupImpl::getCompressedSize() const
return compressed_size;
}
size_t BackupImpl::getNumReadFiles() const
{
std::lock_guard lock{mutex};
return num_read_files;
}
UInt64 BackupImpl::getNumReadBytes() const
{
std::lock_guard lock{mutex};
return num_read_bytes;
}
void BackupImpl::writeBackupMetadata()
{
assert(!is_internal_backup);
@ -323,12 +341,18 @@ void BackupImpl::writeBackupMetadata()
}
}
size_t index = 0;
for (const auto & info : all_file_infos)
num_files = all_file_infos.size();
total_size = 0;
num_entries = 0;
size_of_entries = 0;
for (size_t i = 0; i != all_file_infos.size(); ++i)
{
String prefix = index ? "contents.file[" + std::to_string(index) + "]." : "contents.file.";
const auto & info = all_file_infos[i];
String prefix = i ? "contents.file[" + std::to_string(i) + "]." : "contents.file.";
config->setString(prefix + "name", info.file_name);
config->setUInt64(prefix + "size", info.size);
if (info.size)
{
config->setString(prefix + "checksum", hexChecksum(info.checksum));
@ -348,8 +372,14 @@ void BackupImpl::writeBackupMetadata()
if (info.pos_in_archive != static_cast<size_t>(-1))
config->setUInt64(prefix + "pos_in_archive", info.pos_in_archive);
}
increaseUncompressedSize(info);
++index;
total_size += info.size;
bool has_entry = !deduplicate_files || (info.size && (info.size != info.base_size) && (info.data_file_name.empty() || (info.data_file_name == info.file_name)));
if (has_entry)
{
++num_entries;
size_of_entries += info.size - info.base_size;
}
}
std::ostringstream stream; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
@ -366,8 +396,7 @@ void BackupImpl::writeBackupMetadata()
out->write(str.data(), str.size());
out->finalize();
increaseUncompressedSize(str.size());
increaseProcessedSize(str.size());
uncompressed_size = size_of_entries + str.size();
}
@ -392,8 +421,6 @@ void BackupImpl::readBackupMetadata()
String str;
readStringUntilEOF(str, *in);
increaseUncompressedSize(str.size());
increaseProcessedSize(str.size());
Poco::XML::DOMParser dom_parser;
Poco::AutoPtr<Poco::XML::Document> config = dom_parser.parseMemory(str.data(), str.size());
const Poco::XML::Node * config_root = getRootNode(config);
@ -412,6 +439,11 @@ void BackupImpl::readBackupMetadata()
if (config_root->getNodeByPath("base_backup_uuid"))
base_backup_uuid = parse<UUID>(getString(config_root, "base_backup_uuid"));
num_files = 0;
total_size = 0;
num_entries = 0;
size_of_entries = 0;
const auto * contents = config_root->getNodeByPath("contents");
for (const Poco::XML::Node * child = contents->firstChild(); child; child = child->nextSibling())
{
@ -456,10 +488,20 @@ void BackupImpl::readBackupMetadata()
}
coordination->addFileInfo(info);
increaseUncompressedSize(info);
++num_files;
total_size += info.size;
bool has_entry = !deduplicate_files || (info.size && (info.size != info.base_size) && (info.data_file_name.empty() || (info.data_file_name == info.file_name)));
if (has_entry)
{
++num_entries;
size_of_entries += info.size - info.base_size;
}
}
}
uncompressed_size = size_of_entries + str.size();
compressed_size = uncompressed_size;
if (!use_archives)
setCompressedSize();
}
@ -612,7 +654,8 @@ BackupEntryPtr BackupImpl::readFile(const SizeAndChecksum & size_and_checksum) c
if (open_mode != OpenMode::READ)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Backup is not opened for reading");
increaseProcessedSize(size_and_checksum.first);
++num_read_files;
num_read_bytes += size_and_checksum.first;
if (!size_and_checksum.first)
{
@ -780,7 +823,8 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
{
std::lock_guard lock{mutex};
increaseProcessedSize(info);
++num_files;
total_size += info.size;
}
/// Empty file, nothing to backup
@ -909,7 +953,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
{
LOG_TRACE(log, "Will copy file {}", adjusted_path);
if (!num_files_written)
if (!num_entries)
checkLockFile(true);
if (use_archives)
@ -951,7 +995,12 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
}
}
++num_files_written;
{
std::lock_guard lock{mutex};
++num_entries;
size_of_entries += info.size - info.base_size;
uncompressed_size += info.size - info.base_size;
}
}
@ -981,29 +1030,6 @@ void BackupImpl::finalizeWriting()
}
void BackupImpl::increaseUncompressedSize(UInt64 file_size)
{
uncompressed_size += file_size;
++num_files;
}
void BackupImpl::increaseUncompressedSize(const FileInfo & info)
{
if ((info.size > info.base_size) && (info.data_file_name.empty() || (info.data_file_name == info.file_name)))
increaseUncompressedSize(info.size - info.base_size);
}
void BackupImpl::increaseProcessedSize(UInt64 file_size) const
{
processed_files_size += file_size;
++num_processed_files;
}
void BackupImpl::increaseProcessedSize(const FileInfo & info)
{
increaseProcessedSize(info.size);
}
void BackupImpl::setCompressedSize()
{
if (use_archives)

View File

@ -59,10 +59,13 @@ public:
time_t getTimestamp() const override { return timestamp; }
UUID getUUID() const override { return *uuid; }
size_t getNumFiles() const override;
size_t getNumProcessedFiles() const override;
UInt64 getProcessedFilesSize() const override;
UInt64 getTotalSize() const override;
size_t getNumEntries() const override;
UInt64 getSizeOfEntries() const override;
UInt64 getUncompressedSize() const override;
UInt64 getCompressedSize() const override;
size_t getNumReadFiles() const override;
UInt64 getNumReadBytes() const override;
Strings listFiles(const String & directory, bool recursive) const override;
bool hasFiles(const String & directory) const override;
bool fileExists(const String & file_name) const override;
@ -103,16 +106,6 @@ private:
std::shared_ptr<IArchiveReader> getArchiveReader(const String & suffix) const;
std::shared_ptr<IArchiveWriter> getArchiveWriter(const String & suffix);
/// Increases `uncompressed_size` by a specific value,
/// also increases `num_files` by 1.
void increaseUncompressedSize(UInt64 file_size);
void increaseUncompressedSize(const FileInfo & info);
/// Increases `num_processed_files` by a specific value,
/// also increases `num_processed_files` by 1.
void increaseProcessedSize(UInt64 file_size) const;
void increaseProcessedSize(const FileInfo & info);
/// Calculates and sets `compressed_size`.
void setCompressedSize();
@ -129,10 +122,13 @@ private:
std::optional<UUID> uuid;
time_t timestamp = 0;
size_t num_files = 0;
mutable size_t num_processed_files = 0;
mutable UInt64 processed_files_size = 0;
UInt64 total_size = 0;
size_t num_entries = 0;
UInt64 size_of_entries = 0;
UInt64 uncompressed_size = 0;
UInt64 compressed_size = 0;
mutable size_t num_read_files = 0;
mutable UInt64 num_read_bytes = 0;
int version;
std::optional<BackupInfo> base_backup_info;
std::shared_ptr<const IBackup> base_backup;
@ -141,7 +137,6 @@ private:
std::pair<String, std::shared_ptr<IArchiveWriter>> archive_writers[2];
String current_archive_suffix;
String lock_file_name;
std::atomic<size_t> num_files_written = 0;
bool writing_finalized = false;
bool deduplicate_files = true;
const Poco::Logger * log;

View File

@ -338,20 +338,20 @@ void BackupsWorker::doBackup(
}
size_t num_files = 0;
size_t num_processed_files = 0;
UInt64 total_size = 0;
size_t num_entries = 0;
UInt64 uncompressed_size = 0;
UInt64 compressed_size = 0;
UInt64 processed_files_size = 0;
/// Finalize backup (write its metadata).
if (!backup_settings.internal)
{
backup->finalizeWriting();
num_files = backup->getNumFiles();
num_processed_files = backup->getNumProcessedFiles();
total_size = backup->getTotalSize();
num_entries = backup->getNumEntries();
uncompressed_size = backup->getUncompressedSize();
compressed_size = backup->getCompressedSize();
processed_files_size = backup->getProcessedFilesSize();
}
/// Close the backup.
@ -359,7 +359,7 @@ void BackupsWorker::doBackup(
LOG_INFO(log, "{} {} was created successfully", (backup_settings.internal ? "Internal backup" : "Backup"), backup_name_for_logging);
setStatus(backup_id, BackupStatus::BACKUP_CREATED);
setNumFilesAndSize(backup_id, num_files, num_processed_files, processed_files_size, uncompressed_size, compressed_size);
setNumFilesAndSize(backup_id, num_files, total_size, num_entries, uncompressed_size, compressed_size, 0, 0);
}
catch (...)
{
@ -583,10 +583,12 @@ void BackupsWorker::doRestore(
setNumFilesAndSize(
restore_id,
backup->getNumFiles(),
backup->getNumProcessedFiles(),
backup->getProcessedFilesSize(),
backup->getTotalSize(),
backup->getNumEntries(),
backup->getUncompressedSize(),
backup->getCompressedSize());
backup->getCompressedSize(),
backup->getNumReadFiles(),
backup->getNumReadBytes());
}
catch (...)
{
@ -667,7 +669,9 @@ void BackupsWorker::setStatus(const String & id, BackupStatus status, bool throw
}
void BackupsWorker::setNumFilesAndSize(const String & id, size_t num_files, size_t num_processed_files, UInt64 processed_files_size, UInt64 uncompressed_size, UInt64 compressed_size)
void BackupsWorker::setNumFilesAndSize(const OperationID & id, size_t num_files, UInt64 total_size, size_t num_entries,
UInt64 uncompressed_size, UInt64 compressed_size, size_t num_read_files, UInt64 num_read_bytes)
{
std::lock_guard lock{infos_mutex};
auto it = infos.find(id);
@ -676,10 +680,12 @@ void BackupsWorker::setNumFilesAndSize(const String & id, size_t num_files, size
auto & info = it->second;
info.num_files = num_files;
info.num_processed_files = num_processed_files;
info.processed_files_size = processed_files_size;
info.total_size = total_size;
info.num_entries = num_entries;
info.uncompressed_size = uncompressed_size;
info.compressed_size = compressed_size;
info.num_read_files = num_read_files;
info.num_read_bytes = num_read_bytes;
}

View File

@ -53,23 +53,27 @@ public:
/// Status of backup or restore operation.
BackupStatus status;
/// Number of files in the backup (including backup's metadata; only unique files are counted).
/// The number of files stored in the backup.
size_t num_files = 0;
/// Number of processed files during backup or restore process
/// For restore it includes files from base backups
size_t num_processed_files = 0;
/// The total size of files stored in the backup.
UInt64 total_size = 0;
/// Size of processed files during backup or restore
/// For restore in includes sizes from base backups
UInt64 processed_files_size = 0;
/// The number of entries in the backup, i.e. the number of files inside the folder if the backup is stored as a folder.
size_t num_entries = 0;
/// Size of all files in the backup (including backup's metadata; only unique files are counted).
/// The uncompressed size of the backup.
UInt64 uncompressed_size = 0;
/// Size of the backup if it's stored as an archive; or the same as `uncompressed_size` if the backup is stored as a folder.
/// The compressed size of the backup.
UInt64 compressed_size = 0;
/// Returns the number of files read during RESTORE from this backup.
size_t num_read_files = 0;
// Returns the total size of files read during RESTORE from this backup.
UInt64 num_read_bytes = 0;
/// Set only if there was an error.
std::exception_ptr exception;
String error_message;
@ -110,7 +114,9 @@ private:
void addInfo(const OperationID & id, const String & name, bool internal, BackupStatus status);
void setStatus(const OperationID & id, BackupStatus status, bool throw_if_error = true);
void setStatusSafe(const String & id, BackupStatus status) { setStatus(id, status, false); }
void setNumFilesAndSize(const OperationID & id, size_t num_files, size_t num_processed_files, UInt64 processed_files_size, UInt64 uncompressed_size, UInt64 compressed_size);
void setNumFilesAndSize(const OperationID & id, size_t num_files, UInt64 total_size, size_t num_entries,
UInt64 uncompressed_size, UInt64 compressed_size, size_t num_read_files, UInt64 num_read_bytes);
std::vector<Info> getAllActiveBackupInfos() const;
std::vector<Info> getAllActiveRestoreInfos() const;
bool hasConcurrentBackups(const BackupSettings & backup_settings) const;

View File

@ -37,21 +37,38 @@ public:
/// Returns UUID of the backup.
virtual UUID getUUID() const = 0;
/// Returns the number of unique files in the backup.
/// Returns the number of files stored in the backup. Compare with getNumEntries().
virtual size_t getNumFiles() const = 0;
/// Returns the number of files were processed for backup or restore
virtual size_t getNumProcessedFiles() const = 0;
/// Returns the total size of files stored in the backup. Compare with getTotalSizeOfEntries().
virtual UInt64 getTotalSize() const = 0;
// Returns the total size of processed files for backup or restore
virtual UInt64 getProcessedFilesSize() const = 0;
/// Returns the number of entries in the backup, i.e. the number of files inside the folder if the backup is stored as a folder or
/// the number of files inside the archive if the backup is stored as an archive.
/// It's not the same as getNumFiles() if it's an incremental backups or if it contains empty files or duplicates.
/// The following is always true: `getNumEntries() <= getNumFiles()`.
virtual size_t getNumEntries() const = 0;
/// Returns the total size of unique files in the backup.
/// Returns the size of entries in the backup, i.e. the total size of files inside the folder if the backup is stored as a folder or
/// the total size of files inside the archive if the backup is stored as an archive.
/// It's not the same as getTotalSize() because it doesn't include the size of duplicates and the size of files from the base backup.
/// The following is always true: `getSizeOfEntries() <= getTotalSize()`.
virtual UInt64 getSizeOfEntries() const = 0;
/// Returns the uncompressed size of the backup. It equals to `getSizeOfEntries() + size_of_backup_metadata (.backup)`
virtual UInt64 getUncompressedSize() const = 0;
/// Returns the compressed size of the backup. If the backup is not stored as an archive it returns the same as getUncompressedSize().
/// Returns the compressed size of the backup. If the backup is not stored as an archive it's the same as getUncompressedSize().
virtual UInt64 getCompressedSize() const = 0;
/// Returns the number of files read during RESTORE from this backup.
/// The following is always true: `getNumFilesRead() <= getNumFiles()`.
virtual size_t getNumReadFiles() const = 0;
// Returns the total size of files read during RESTORE from this backup.
/// The following is always true: `getNumReadBytes() <= getTotalSize()`.
virtual UInt64 getNumReadBytes() const = 0;
/// Returns names of entries stored in a specified directory in the backup.
/// If `directory` is empty or '/' the functions returns entries in the backup's root.
virtual Strings listFiles(const String & directory, bool recursive = false) const = 0;

View File

@ -11,7 +11,7 @@
namespace DB
{
class CatBoostLibraryBridgeHelper : public LibraryBridgeHelper
class CatBoostLibraryBridgeHelper final : public LibraryBridgeHelper
{
public:
static constexpr inline auto PING_HANDLER = "/catboost_ping";

View File

@ -14,7 +14,7 @@ namespace DB
class Pipe;
// Class to access the external dictionary part of the clickhouse-library-bridge.
class ExternalDictionaryLibraryBridgeHelper : public LibraryBridgeHelper
class ExternalDictionaryLibraryBridgeHelper final : public LibraryBridgeHelper
{
public:

View File

@ -343,8 +343,8 @@ set_source_files_properties(
PROPERTIES COMPILE_FLAGS "-mwaitpkg")
endif ()
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::re2_st)
target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::re2)
target_link_libraries(common PUBLIC ch_contrib::re2_st)
target_link_libraries(common PUBLIC ch_contrib::re2)
target_link_libraries(clickhouse_common_io
PUBLIC

View File

@ -686,7 +686,7 @@ void Connection::sendReadTaskResponse(const String & response)
}
void Connection::sendMergeTreeReadTaskResponse(const PartitionReadResponse & response)
void Connection::sendMergeTreeReadTaskResponse(const ParallelReadResponse & response)
{
writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out);
response.serialize(*out);
@ -960,8 +960,12 @@ Packet Connection::receivePacket()
case Protocol::Server::ReadTaskRequest:
return res;
case Protocol::Server::MergeTreeAllRangesAnnounecement:
res.announcement = receiveInitialParallelReadAnnounecement();
return res;
case Protocol::Server::MergeTreeReadTaskRequest:
res.request = receivePartitionReadRequest();
res.request = receiveParallelReadRequest();
return res;
case Protocol::Server::ProfileEvents:
@ -1114,13 +1118,20 @@ ProfileInfo Connection::receiveProfileInfo() const
return profile_info;
}
PartitionReadRequest Connection::receivePartitionReadRequest() const
ParallelReadRequest Connection::receiveParallelReadRequest() const
{
PartitionReadRequest request;
ParallelReadRequest request;
request.deserialize(*in);
return request;
}
InitialAllRangesAnnouncement Connection::receiveInitialParallelReadAnnounecement() const
{
InitialAllRangesAnnouncement announcement;
announcement.deserialize(*in);
return announcement;
}
void Connection::throwUnexpectedPacket(UInt64 packet_type, const char * expected) const
{

View File

@ -110,7 +110,7 @@ public:
void sendData(const Block & block, const String & name/* = "" */, bool scalar/* = false */) override;
void sendMergeTreeReadTaskResponse(const PartitionReadResponse & response) override;
void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) override;
void sendExternalTablesData(ExternalTablesData & data) override;
@ -265,7 +265,8 @@ private:
std::vector<String> receiveMultistringMessage(UInt64 msg_type) const;
std::unique_ptr<Exception> receiveException() const;
Progress receiveProgress() const;
PartitionReadRequest receivePartitionReadRequest() const;
ParallelReadRequest receiveParallelReadRequest() const;
InitialAllRangesAnnouncement receiveInitialParallelReadAnnounecement() const;
ProfileInfo receiveProfileInfo() const;
void initInputBuffers();

View File

@ -94,7 +94,7 @@ public:
throw Exception(ErrorCodes::LOGICAL_ERROR, "sendReadTaskResponse in not supported with HedgedConnections");
}
void sendMergeTreeReadTaskResponse(PartitionReadResponse) override
void sendMergeTreeReadTaskResponse(const ParallelReadResponse &) override
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "sendMergeTreeReadTaskResponse in not supported with HedgedConnections");
}

View File

@ -34,7 +34,7 @@ public:
bool with_pending_data) = 0;
virtual void sendReadTaskResponse(const String &) = 0;
virtual void sendMergeTreeReadTaskResponse(PartitionReadResponse response) = 0;
virtual void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) = 0;
/// Get packet from any replica.
virtual Packet receivePacket() = 0;
@ -60,9 +60,9 @@ public:
/// Get the replica addresses as a string.
virtual std::string dumpAddresses() const = 0;
struct ReplicaInfo
{
bool collaborate_with_initiator{false};
size_t all_replicas_count{0};
size_t number_of_current_replica{0};
};

View File

@ -33,8 +33,10 @@ struct Packet
Progress progress;
ProfileInfo profile_info;
std::vector<UUID> part_uuids;
PartitionReadRequest request;
PartitionReadResponse response;
InitialAllRangesAnnouncement announcement;
ParallelReadRequest request;
ParallelReadResponse response;
Packet() : type(Protocol::Server::Hello) {}
};
@ -104,7 +106,7 @@ public:
/// Send all contents of external (temporary) tables.
virtual void sendExternalTablesData(ExternalTablesData & data) = 0;
virtual void sendMergeTreeReadTaskResponse(const PartitionReadResponse & response) = 0;
virtual void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) = 0;
/// Check, if has data to read.
virtual bool poll(size_t timeout_microseconds) = 0;

View File

@ -508,7 +508,7 @@ void LocalConnection::sendExternalTablesData(ExternalTablesData &)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented");
}
void LocalConnection::sendMergeTreeReadTaskResponse(const PartitionReadResponse &)
void LocalConnection::sendMergeTreeReadTaskResponse(const ParallelReadResponse &)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented");
}

View File

@ -110,7 +110,7 @@ public:
void sendExternalTablesData(ExternalTablesData &) override;
void sendMergeTreeReadTaskResponse(const PartitionReadResponse & response) override;
void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) override;
bool poll(size_t timeout_microseconds/* = 0 */) override;

View File

@ -133,16 +133,11 @@ void MultiplexedConnections::sendQuery(
modified_settings.group_by_two_level_threshold_bytes = 0;
}
bool parallel_reading_from_replicas = settings.max_parallel_replicas > 1
&& settings.allow_experimental_parallel_reading_from_replicas
/// To avoid trying to coordinate with clickhouse-benchmark,
/// since it uses the same code.
&& client_info.query_kind != ClientInfo::QueryKind::INITIAL_QUERY;
if (parallel_reading_from_replicas)
if (replica_info)
{
client_info.collaborate_with_initiator = true;
client_info.count_participating_replicas = replica_info.all_replicas_count;
client_info.number_of_current_replica = replica_info.number_of_current_replica;
client_info.count_participating_replicas = replica_info->all_replicas_count;
client_info.number_of_current_replica = replica_info->number_of_current_replica;
}
}
@ -199,7 +194,7 @@ void MultiplexedConnections::sendReadTaskResponse(const String & response)
}
void MultiplexedConnections::sendMergeTreeReadTaskResponse(PartitionReadResponse response)
void MultiplexedConnections::sendMergeTreeReadTaskResponse(const ParallelReadResponse & response)
{
std::lock_guard lock(cancel_mutex);
if (cancelled)
@ -263,6 +258,7 @@ Packet MultiplexedConnections::drain()
switch (packet.type)
{
case Protocol::Server::MergeTreeAllRangesAnnounecement:
case Protocol::Server::MergeTreeReadTaskRequest:
case Protocol::Server::ReadTaskRequest:
case Protocol::Server::PartUUIDs:
@ -343,6 +339,7 @@ Packet MultiplexedConnections::receivePacketUnlocked(AsyncCallback async_callbac
switch (packet.type)
{
case Protocol::Server::MergeTreeAllRangesAnnounecement:
case Protocol::Server::MergeTreeReadTaskRequest:
case Protocol::Server::ReadTaskRequest:
case Protocol::Server::PartUUIDs:

View File

@ -42,7 +42,7 @@ public:
bool with_pending_data) override;
void sendReadTaskResponse(const String &) override;
void sendMergeTreeReadTaskResponse(PartitionReadResponse response) override;
void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) override;
Packet receivePacket() override;
@ -104,7 +104,8 @@ private:
bool sent_query = false;
bool cancelled = false;
ReplicaInfo replica_info;
/// std::nullopt if parallel reading from replicas is not used
std::optional<ReplicaInfo> replica_info;
/// A mutex for the sendCancel function to execute safely
/// in separate thread.

View File

@ -417,6 +417,10 @@ ReplxxLineReader::ReplxxLineReader(
{
rx.print("skim failed: %s (consider using Ctrl-T for a regular non-fuzzy reverse search)\n", e.what());
}
/// REPAINT before to avoid prompt overlap by the query
rx.invoke(Replxx::ACTION::REPAINT, code);
if (!new_query.empty())
rx.set_state(replxx::Replxx::State(new_query.c_str(), static_cast<int>(new_query.size())));

View File

@ -14,76 +14,82 @@ namespace OpenTelemetry
thread_local TracingContextOnThread current_thread_trace_context;
void Span::addAttribute(std::string_view name, UInt64 value)
bool Span::addAttribute(std::string_view name, UInt64 value) noexcept
{
if (!this->isTraceEnabled() || name.empty())
return;
return false;
this->attributes.push_back(Tuple{name, toString(value)});
return addAttributeImpl(name, toString(value));
}
void Span::addAttributeIfNotZero(std::string_view name, UInt64 value)
bool Span::addAttributeIfNotZero(std::string_view name, UInt64 value) noexcept
{
if (value != 0)
addAttribute(name, value);
if (!this->isTraceEnabled() || name.empty() || value == 0)
return false;
return addAttributeImpl(name, toString(value));
}
void Span::addAttribute(std::string_view name, std::string_view value)
bool Span::addAttribute(std::string_view name, std::string_view value) noexcept
{
if (!this->isTraceEnabled() || name.empty())
return;
return false;
this->attributes.push_back(Tuple{name, value});
return addAttributeImpl(name, value);
}
void Span::addAttributeIfNotEmpty(std::string_view name, std::string_view value)
bool Span::addAttributeIfNotEmpty(std::string_view name, std::string_view value) noexcept
{
if (!this->isTraceEnabled() || name.empty() || value.empty())
return;
return false;
this->attributes.push_back(Tuple{name, value});
return addAttributeImpl(name, value);
}
void Span::addAttribute(std::string_view name, std::function<String()> value_supplier)
bool Span::addAttribute(std::string_view name, std::function<String()> value_supplier) noexcept
{
if (!this->isTraceEnabled() || !value_supplier)
return;
if (!this->isTraceEnabled() || name.empty() || !value_supplier)
return false;
String value = value_supplier();
if (value.empty())
return;
this->attributes.push_back(Tuple{name, value});
try
{
auto value = value_supplier();
return value.empty() ? false : addAttributeImpl(name, value);
}
catch (...)
{
/// Ignore exception raised by value_supplier
return false;
}
}
void Span::addAttribute(const Exception & e) noexcept
bool Span::addAttribute(const Exception & e) noexcept
{
if (!this->isTraceEnabled())
return;
return false;
try
{
this->attributes.push_back(Tuple{"clickhouse.exception", getExceptionMessage(e, false)});
}
catch (...)
{
/// Ignore exceptions
}
return addAttributeImpl("clickhouse.exception", getExceptionMessage(e, false));
}
void Span::addAttribute(std::exception_ptr e) noexcept
bool Span::addAttribute(std::exception_ptr e) noexcept
{
if (!this->isTraceEnabled() || e == nullptr)
return;
return false;
return addAttributeImpl("clickhouse.exception", getExceptionMessage(e, false));
}
bool Span::addAttributeImpl(std::string_view name, std::string_view value) noexcept
{
try
{
this->attributes.push_back(Tuple{"clickhouse.exception", getExceptionMessage(e, false)});
this->attributes.push_back(Tuple{name, value});
}
catch (...)
{
/// Ignore exceptions
return false;
}
return true;
}
SpanHolder::SpanHolder(std::string_view _operation_name)

View File

@ -23,21 +23,24 @@ struct Span
UInt64 finish_time_us = 0;
Map attributes;
void addAttribute(std::string_view name, UInt64 value);
void addAttributeIfNotZero(std::string_view name, UInt64 value);
void addAttribute(std::string_view name, std::string_view value);
void addAttributeIfNotEmpty(std::string_view name, std::string_view value);
void addAttribute(std::string_view name, std::function<String()> value_supplier);
/// Following two methods are declared as noexcept to make sure they're exception safe
/// This is because they're usually called in exception handler
void addAttribute(const Exception & e) noexcept;
void addAttribute(std::exception_ptr e) noexcept;
/// Following methods are declared as noexcept to make sure they're exception safe.
/// This is because sometimes they will be called in exception handlers/dtor.
/// Returns true if attribute is successfully added and false otherwise.
bool addAttribute(std::string_view name, UInt64 value) noexcept;
bool addAttributeIfNotZero(std::string_view name, UInt64 value) noexcept;
bool addAttribute(std::string_view name, std::string_view value) noexcept;
bool addAttributeIfNotEmpty(std::string_view name, std::string_view value) noexcept;
bool addAttribute(std::string_view name, std::function<String()> value_supplier) noexcept;
bool addAttribute(const Exception & e) noexcept;
bool addAttribute(std::exception_ptr e) noexcept;
bool isTraceEnabled() const
{
return trace_id != UUID();
}
private:
bool addAttributeImpl(std::string_view name, std::string_view value) noexcept;
};
/// See https://www.w3.org/TR/trace-context/ for trace_flags definition

View File

@ -3,6 +3,7 @@
#include <Common/Exception.h>
#include <Common/getNumberOfPhysicalCPUCores.h>
#include <Common/OpenTelemetryTraceContext.h>
#include <Common/noexcept_scope.h>
#include <cassert>
#include <iostream>
@ -209,6 +210,7 @@ ThreadPoolImpl<Thread>::~ThreadPoolImpl()
/// and the destruction order of global variables is unspecified.
finalize();
onDestroy();
}
template <typename Thread>
@ -227,6 +229,24 @@ void ThreadPoolImpl<Thread>::finalize()
threads.clear();
}
template <typename Thread>
void ThreadPoolImpl<Thread>::addOnDestroyCallback(OnDestroyCallback && callback)
{
std::lock_guard lock(mutex);
on_destroy_callbacks.push(std::move(callback));
}
template <typename Thread>
void ThreadPoolImpl<Thread>::onDestroy()
{
while (!on_destroy_callbacks.empty())
{
auto callback = std::move(on_destroy_callbacks.top());
on_destroy_callbacks.pop();
NOEXCEPT_SCOPE({ callback(); });
}
}
template <typename Thread>
size_t ThreadPoolImpl<Thread>::active() const
{

View File

@ -9,6 +9,7 @@
#include <list>
#include <optional>
#include <atomic>
#include <stack>
#include <boost/heap/priority_queue.hpp>
@ -80,6 +81,16 @@ public:
void setQueueSize(size_t value);
size_t getMaxThreads() const;
/// Adds a callback which is called in destructor after
/// joining of all threads. The order of calling callbacks
/// is reversed to the order of their addition.
/// It may be useful for static thread pools to call
/// function after joining of threads because order
/// of destructors of global static objects and callbacks
/// added by atexit is undefined for different translation units.
using OnDestroyCallback = std::function<void()>;
void addOnDestroyCallback(OnDestroyCallback && callback);
private:
mutable std::mutex mutex;
std::condition_variable job_finished;
@ -111,6 +122,7 @@ private:
boost::heap::priority_queue<JobWithPriority> jobs;
std::list<Thread> threads;
std::exception_ptr first_exception;
std::stack<OnDestroyCallback> on_destroy_callbacks;
template <typename ReturnType>
ReturnType scheduleImpl(Job job, ssize_t priority, std::optional<uint64_t> wait_microseconds, bool propagate_opentelemetry_tracing_context = true);
@ -118,6 +130,7 @@ private:
void worker(typename std::list<Thread>::iterator thread_it);
void finalize();
void onDestroy();
};
@ -146,7 +159,8 @@ class GlobalThreadPool : public FreeThreadPool, private boost::noncopyable
size_t queue_size_, const bool shutdown_on_exception_)
: FreeThreadPool(max_threads_, max_free_threads_, queue_size_,
shutdown_on_exception_)
{}
{
}
public:
static void initialize(size_t max_threads = 10000, size_t max_free_threads = 1000, size_t queue_size = 10000);

View File

@ -17,7 +17,7 @@ namespace DB
* Disadvantages:
* - in case you need to read a lot of data in a row, but some of them only a part is cached, you have to do seek-and.
*/
class CachedCompressedReadBuffer : public CompressedReadBufferBase, public ReadBuffer
class CachedCompressedReadBuffer final : public CompressedReadBufferBase, public ReadBuffer
{
private:
std::function<std::unique_ptr<ReadBufferFromFileBase>()> file_in_creator;

View File

@ -11,7 +11,7 @@ namespace DB
/** A buffer for reading from a compressed file with just checking checksums of
* the compressed blocks, without any decompression.
*/
class CheckingCompressedReadBuffer : public CompressedReadBufferBase, public ReadBuffer
class CheckingCompressedReadBuffer final : public CompressedReadBufferBase, public ReadBuffer
{
protected:
bool nextImpl() override;

View File

@ -8,7 +8,7 @@
namespace DB
{
class CompressedReadBuffer : public CompressedReadBufferBase, public BufferWithOwnMemory<ReadBuffer>
class CompressedReadBuffer final : public CompressedReadBufferBase, public BufferWithOwnMemory<ReadBuffer>
{
private:
size_t size_compressed = 0;

View File

@ -14,7 +14,7 @@ class MMappedFileCache;
/// Unlike CompressedReadBuffer, it can do seek.
class CompressedReadBufferFromFile : public CompressedReadBufferBase, public BufferWithOwnMemory<ReadBuffer>
class CompressedReadBufferFromFile final : public CompressedReadBufferBase, public BufferWithOwnMemory<ReadBuffer>
{
private:
/** At any time, one of two things is true:

View File

@ -13,7 +13,7 @@
namespace DB
{
class CompressedWriteBuffer : public BufferWithOwnMemory<WriteBuffer>
class CompressedWriteBuffer final : public BufferWithOwnMemory<WriteBuffer>
{
public:
explicit CompressedWriteBuffer(

View File

@ -87,7 +87,7 @@ private:
Poco::Logger * log;
};
class CompressionCodecDeflateQpl : public ICompressionCodec
class CompressionCodecDeflateQpl final : public ICompressionCodec
{
public:
CompressionCodecDeflateQpl();

View File

@ -44,7 +44,7 @@ enum EncryptionMethod
* as otherwise our engines like ReplicatedMergeTree cannot
* deduplicate data blocks.
*/
class CompressionCodecEncrypted : public ICompressionCodec
class CompressionCodecEncrypted final : public ICompressionCodec
{
public:
/** If a key is available, the server is supposed to

View File

@ -8,7 +8,7 @@
namespace DB
{
class CompressionCodecNone : public ICompressionCodec
class CompressionCodecNone final : public ICompressionCodec
{
public:
CompressionCodecNone();

View File

@ -17,10 +17,7 @@
#include <Common/Macros.h>
#include <aws/core/auth/AWSCredentials.h>
#include <aws/s3/S3Client.h>
#include <aws/s3/S3Errors.h>
#include <aws/s3/model/HeadObjectRequest.h>
#include <aws/s3/model/DeleteObjectRequest.h>
#include <filesystem>
@ -31,7 +28,7 @@ namespace DB
struct KeeperSnapshotManagerS3::S3Configuration
{
S3Configuration(S3::URI uri_, S3::AuthSettings auth_settings_, std::shared_ptr<const Aws::S3::S3Client> client_)
S3Configuration(S3::URI uri_, S3::AuthSettings auth_settings_, std::shared_ptr<const S3::Client> client_)
: uri(std::move(uri_))
, auth_settings(std::move(auth_settings_))
, client(std::move(client_))
@ -39,7 +36,7 @@ struct KeeperSnapshotManagerS3::S3Configuration
S3::URI uri;
S3::AuthSettings auth_settings;
std::shared_ptr<const Aws::S3::S3Client> client;
std::shared_ptr<const S3::Client> client;
};
KeeperSnapshotManagerS3::KeeperSnapshotManagerS3()
@ -202,7 +199,7 @@ void KeeperSnapshotManagerS3::uploadSnapshotImpl(const std::string & snapshot_pa
LOG_INFO(log, "Removing lock file");
try
{
Aws::S3::Model::DeleteObjectRequest delete_request;
S3::DeleteObjectRequest delete_request;
delete_request.SetBucket(s3_client->uri.bucket);
delete_request.SetKey(lock_file);
auto delete_outcome = s3_client->client->DeleteObject(delete_request);

View File

@ -81,7 +81,8 @@ namespace Protocol
/// This is such an inverted logic, where server sends requests
/// And client returns back response
ProfileEvents = 14, /// Packet with profile events from server.
MergeTreeReadTaskRequest = 15, /// Request from a MergeTree replica to a coordinator
MergeTreeAllRangesAnnounecement = 15,
MergeTreeReadTaskRequest = 16, /// Request from a MergeTree replica to a coordinator
MAX = MergeTreeReadTaskRequest,
};
@ -108,6 +109,7 @@ namespace Protocol
"PartUUIDs",
"ReadTaskRequest",
"ProfileEvents",
"MergeTreeAllRangesAnnounecement",
"MergeTreeReadTaskRequest",
};
return packet <= MAX

View File

@ -33,6 +33,8 @@
#define DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION 1
#define DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS 54453
#define DBMS_MERGE_TREE_PART_INFO_VERSION 1
/// Minimum revision supporting interserver secret.
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET 54441

View File

@ -151,7 +151,9 @@ class IColumn;
M(UInt64, parallel_replicas_count, 0, "This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the number of parallel replicas participating in query processing.", 0) \
M(UInt64, parallel_replica_offset, 0, "This is internal setting that should not be used directly and represents an implementation detail of the 'parallel replicas' mode. This setting will be automatically set up by the initiator server for distributed queries to the index of the replica participating in query processing among parallel replicas.", 0) \
\
M(String, cluster_for_parallel_replicas, "default", "Cluster for a shard in which current server is located", 0) \
M(Bool, allow_experimental_parallel_reading_from_replicas, false, "If true, ClickHouse will send a SELECT query to all replicas of a table. It will work for any kind on MergeTree table.", 0) \
M(Float, parallel_replicas_single_task_marks_count_multiplier, 2, "A multiplier which will be added during calculation for minimal number of marks to retrieve from coordinator. This will be applied only for remote replicas.", 0) \
\
M(Bool, skip_unavailable_shards, false, "If true, ClickHouse silently skips unavailable shards and nodes unresolvable through DNS. Shard is marked as unavailable when none of the replicas can be reached.", 0) \
\
@ -603,7 +605,7 @@ class IColumn;
M(ShortCircuitFunctionEvaluation, short_circuit_function_evaluation, ShortCircuitFunctionEvaluation::ENABLE, "Setting for short-circuit function evaluation configuration. Possible values: 'enable' - use short-circuit function evaluation for functions that are suitable for it, 'disable' - disable short-circuit function evaluation, 'force_enable' - use short-circuit function evaluation for all functions.", 0) \
\
M(LocalFSReadMethod, storage_file_read_method, LocalFSReadMethod::mmap, "Method of reading data from storage file, one of: read, pread, mmap.", 0) \
M(String, local_filesystem_read_method, "pread_threadpool", "Method of reading data from local filesystem, one of: read, pread, mmap, io_uring, pread_threadpool.", 0) \
M(String, local_filesystem_read_method, "pread_threadpool", "Method of reading data from local filesystem, one of: read, pread, mmap, io_uring, pread_threadpool. The 'io_uring' method is experimental and does not work for Log, TinyLog, StripeLog, File, Set and Join, and other tables with append-able files in presence of concurrent reads and writes.", 0) \
M(String, remote_filesystem_read_method, "threadpool", "Method of reading data from remote filesystem, one of: read, threadpool.", 0) \
M(Bool, local_filesystem_read_prefetch, false, "Should use prefetching when reading data from local filesystem.", 0) \
M(Bool, remote_filesystem_read_prefetch, true, "Should use prefetching when reading data from remote filesystem.", 0) \
@ -774,6 +776,7 @@ class IColumn;
M(Bool, input_format_tsv_detect_header, true, "Automatically detect header with names and types in TSV format", 0) \
M(Bool, input_format_custom_detect_header, true, "Automatically detect header with names and types in CustomSeparated format", 0) \
M(Bool, input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format Parquet", 0) \
M(UInt64, input_format_parquet_max_block_size, 8192, "Max block size for parquet reader.", 0) \
M(Bool, input_format_protobuf_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip fields with unsupported types while schema inference for format Protobuf", 0) \
M(Bool, input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format CapnProto", 0) \
M(Bool, input_format_orc_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format ORC", 0) \

View File

@ -28,6 +28,7 @@ namespace Poco
{
namespace Util
{
/// NOLINTNEXTLINE(cppcoreguidelines-virtual-class-destructor)
class AbstractConfiguration;
}
}

View File

@ -21,10 +21,6 @@
#include <Interpreters/threadPoolCallbackRunner.h>
#include <Disks/ObjectStorages/S3/diskSettings.h>
#include <aws/s3/model/ListObjectsV2Request.h>
#include <aws/s3/model/DeleteObjectRequest.h>
#include <aws/s3/model/DeleteObjectsRequest.h>
#include <Common/getRandomASCIIString.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/logger_useful.h>
@ -213,7 +209,7 @@ void S3ObjectStorage::findAllFiles(const std::string & path, RelativePathsWithSi
auto settings_ptr = s3_settings.get();
auto client_ptr = client.get();
Aws::S3::Model::ListObjectsV2Request request;
S3::ListObjectsV2Request request;
request.SetBucket(bucket);
request.SetPrefix(path);
if (max_keys)
@ -257,7 +253,7 @@ void S3ObjectStorage::getDirectoryContents(const std::string & path,
auto settings_ptr = s3_settings.get();
auto client_ptr = client.get();
Aws::S3::Model::ListObjectsV2Request request;
S3::ListObjectsV2Request request;
request.SetBucket(bucket);
/// NOTE: if you do "ls /foo" instead of "ls /foo/" over S3 with this API
/// it will return only "/foo" itself without any underlying nodes.
@ -304,7 +300,7 @@ void S3ObjectStorage::removeObjectImpl(const StoredObject & object, bool if_exis
ProfileEvents::increment(ProfileEvents::S3DeleteObjects);
ProfileEvents::increment(ProfileEvents::DiskS3DeleteObjects);
Aws::S3::Model::DeleteObjectRequest request;
S3::DeleteObjectRequest request;
request.SetBucket(bucket);
request.SetKey(object.absolute_path);
auto outcome = client_ptr->DeleteObject(request);
@ -352,7 +348,7 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e
ProfileEvents::increment(ProfileEvents::S3DeleteObjects);
ProfileEvents::increment(ProfileEvents::DiskS3DeleteObjects);
Aws::S3::Model::DeleteObjectsRequest request;
S3::DeleteObjectsRequest request;
request.SetBucket(bucket);
request.SetDelete(delkeys);
auto outcome = client_ptr->DeleteObjects(request);
@ -435,7 +431,7 @@ void S3ObjectStorage::setNewSettings(std::unique_ptr<S3ObjectStorageSettings> &&
s3_settings.set(std::move(s3_settings_));
}
void S3ObjectStorage::setNewClient(std::unique_ptr<Aws::S3::S3Client> && client_)
void S3ObjectStorage::setNewClient(std::unique_ptr<S3::Client> && client_)
{
client.set(std::move(client_));
}
@ -447,7 +443,7 @@ void S3ObjectStorage::shutdown()
/// If S3 request is failed and the method below is executed S3 client immediately returns the last failed S3 request outcome.
/// If S3 is healthy nothing wrong will be happened and S3 requests will be processed in a regular way without errors.
/// This should significantly speed up shutdown process if S3 is unhealthy.
const_cast<Aws::S3::S3Client &>(*client_ptr).DisableRequestProcessing();
const_cast<S3::Client &>(*client_ptr).DisableRequestProcessing();
}
void S3ObjectStorage::startup()
@ -455,7 +451,7 @@ void S3ObjectStorage::startup()
auto client_ptr = client.get();
/// Need to be enabled if it was disabled during shutdown() call.
const_cast<Aws::S3::S3Client &>(*client_ptr).EnableRequestProcessing();
const_cast<S3::Client &>(*client_ptr).EnableRequestProcessing();
}
void S3ObjectStorage::applyNewSettings(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, ContextPtr context)

View File

@ -7,7 +7,6 @@
#include <Disks/ObjectStorages/IObjectStorage.h>
#include <Disks/ObjectStorages/S3/S3Capabilities.h>
#include <memory>
#include <aws/s3/S3Client.h>
#include <Storages/StorageS3Settings.h>
#include <Common/MultiVersion.h>
#include <Common/logger_useful.h>
@ -46,7 +45,7 @@ private:
S3ObjectStorage(
const char * logger_name,
std::unique_ptr<Aws::S3::S3Client> && client_,
std::unique_ptr<S3::Client> && client_,
std::unique_ptr<S3ObjectStorageSettings> && s3_settings_,
String version_id_,
const S3Capabilities & s3_capabilities_,
@ -68,7 +67,7 @@ private:
public:
template <class ...Args>
S3ObjectStorage(std::unique_ptr<Aws::S3::S3Client> && client_, Args && ...args)
explicit S3ObjectStorage(std::unique_ptr<S3::Client> && client_, Args && ...args)
: S3ObjectStorage("S3ObjectStorage", std::move(client_), std::forward<Args>(args)...)
{
}
@ -163,14 +162,14 @@ public:
private:
void setNewSettings(std::unique_ptr<S3ObjectStorageSettings> && s3_settings_);
void setNewClient(std::unique_ptr<Aws::S3::S3Client> && client_);
void setNewClient(std::unique_ptr<S3::Client> && client_);
void removeObjectImpl(const StoredObject & object, bool if_exists);
void removeObjectsImpl(const StoredObjects & objects, bool if_exists);
std::string bucket;
MultiVersion<Aws::S3::S3Client> client;
MultiVersion<S3::Client> client;
MultiVersion<S3ObjectStorageSettings> s3_settings;
S3Capabilities s3_capabilities;
@ -191,7 +190,7 @@ public:
std::string getName() const override { return "S3PlainObjectStorage"; }
template <class ...Args>
S3PlainObjectStorage(Args && ...args)
explicit S3PlainObjectStorage(Args && ...args)
: S3ObjectStorage("S3PlainObjectStorage", std::forward<Args>(args)...)
{
data_source_description.type = DataSourceType::S3_Plain;

View File

@ -107,7 +107,7 @@ std::shared_ptr<S3::ProxyConfiguration> getProxyConfiguration(const String & pre
}
std::unique_ptr<Aws::S3::S3Client> getClient(
std::unique_ptr<S3::Client> getClient(
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,

View File

@ -7,11 +7,13 @@
#include <Poco/Util/AbstractConfiguration.h>
#include <Interpreters/Context_fwd.h>
#include <IO/S3/Client.h>
namespace Aws
{
namespace S3
{
class S3Client;
class Client;
}
}
@ -22,7 +24,7 @@ struct S3ObjectStorageSettings;
std::unique_ptr<S3ObjectStorageSettings> getSettings(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, ContextPtr context);
std::unique_ptr<Aws::S3::S3Client> getClient(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, ContextPtr context, const S3ObjectStorageSettings & settings);
std::unique_ptr<S3::Client> getClient(const Poco::Util::AbstractConfiguration & config, const String & config_prefix, ContextPtr context, const S3ObjectStorageSettings & settings);
}

View File

@ -116,6 +116,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
format_settings.parquet.allow_missing_columns = settings.input_format_parquet_allow_missing_columns;
format_settings.parquet.skip_columns_with_unsupported_types_in_schema_inference = settings.input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference;
format_settings.parquet.output_string_as_string = settings.output_format_parquet_string_as_string;
format_settings.parquet.max_block_size = settings.input_format_parquet_max_block_size;
format_settings.pretty.charset = settings.output_format_pretty_grid_charset.toString() == "ASCII" ? FormatSettings::Pretty::Charset::ASCII : FormatSettings::Pretty::Charset::UTF8;
format_settings.pretty.color = settings.output_format_pretty_color;
format_settings.pretty.max_column_pad_width = settings.output_format_pretty_max_column_pad_width;

View File

@ -183,6 +183,7 @@ struct FormatSettings
bool case_insensitive_column_matching = false;
std::unordered_set<int> skip_row_groups = {};
bool output_string_as_string = false;
UInt64 max_block_size = 8192;
} parquet;
struct Pretty

View File

@ -377,7 +377,7 @@ struct ToDateTransform32Or64
static NO_SANITIZE_UNDEFINED ToType execute(const FromType & from, const DateLUTImpl & time_zone)
{
// since converting to Date, no need in values outside of default LUT range.
return (from <= DATE_LUT_MAX_DAY_NUM)
return (from < DATE_LUT_MAX_DAY_NUM)
? from
: time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF)));
}
@ -394,7 +394,7 @@ struct ToDateTransform32Or64Signed
/// The function should be monotonic (better for query optimizations), so we saturate instead of overflow.
if (from < 0)
return 0;
return (from <= DATE_LUT_MAX_DAY_NUM)
return (from < DATE_LUT_MAX_DAY_NUM)
? static_cast<ToType>(from)
: time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF)));
}

View File

@ -56,10 +56,9 @@ public:
return true;
}
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override
{
return {1};
}
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
bool useDefaultImplementationForNulls() const override { return false; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }

View File

@ -7,9 +7,7 @@
#include <IO/ReadBufferFromS3.h>
#include <IO/S3/getObjectInfo.h>
#include <aws/s3/S3Client.h>
#include <aws/s3/model/GetObjectRequest.h>
#include <aws/s3/model/HeadObjectRequest.h>
#include <IO/S3/Requests.h>
#include <Common/Stopwatch.h>
#include <Common/Throttler.h>
@ -44,7 +42,7 @@ namespace ErrorCodes
ReadBufferFromS3::ReadBufferFromS3(
std::shared_ptr<const Aws::S3::S3Client> client_ptr_,
std::shared_ptr<const S3::Client> client_ptr_,
const String & bucket_,
const String & key_,
const String & version_id_,
@ -281,7 +279,7 @@ SeekableReadBuffer::Range ReadBufferFromS3::getRemainingReadRange() const
std::unique_ptr<ReadBuffer> ReadBufferFromS3::initialize()
{
Aws::S3::Model::GetObjectRequest req;
S3::GetObjectRequest req;
req.SetBucket(bucket);
req.SetKey(key);
if (!version_id.empty())

View File

@ -19,7 +19,7 @@
namespace Aws::S3
{
class S3Client;
class Client;
}
namespace DB
@ -30,7 +30,7 @@ namespace DB
class ReadBufferFromS3 : public ReadBufferFromFileBase
{
private:
std::shared_ptr<const Aws::S3::S3Client> client_ptr;
std::shared_ptr<const S3::Client> client_ptr;
String bucket;
String key;
String version_id;
@ -49,7 +49,7 @@ private:
public:
ReadBufferFromS3(
std::shared_ptr<const Aws::S3::S3Client> client_ptr_,
std::shared_ptr<const S3::Client> client_ptr_,
const String & bucket_,
const String & key_,
const String & version_id_,
@ -95,7 +95,7 @@ class ReadBufferS3Factory : public ParallelReadBuffer::ReadBufferFactory, public
{
public:
explicit ReadBufferS3Factory(
std::shared_ptr<const Aws::S3::S3Client> client_ptr_,
std::shared_ptr<const S3::Client> client_ptr_,
const String & bucket_,
const String & key_,
const String & version_id_,
@ -126,7 +126,7 @@ public:
String getFileName() const override { return bucket + "/" + key; }
private:
std::shared_ptr<const Aws::S3::S3Client> client_ptr;
std::shared_ptr<const S3::Client> client_ptr;
const String bucket;
const String key;
const String version_id;

400
src/IO/S3/Client.cpp Normal file
View File

@ -0,0 +1,400 @@
#include <IO/S3/Client.h>
#if USE_AWS_S3
#include <aws/core/client/DefaultRetryStrategy.h>
#include <aws/s3/model/HeadBucketRequest.h>
#include <aws/s3/model/GetObjectRequest.h>
#include <aws/s3/model/HeadObjectRequest.h>
#include <aws/s3/model/ListObjectsV2Request.h>
#include <aws/core/client/AWSErrorMarshaller.h>
#include <aws/core/endpoint/EndpointParameter.h>
#include <IO/S3Common.h>
#include <IO/S3/Requests.h>
#include <Common/assert_cast.h>
#include <Common/logger_useful.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
namespace S3
{
Client::RetryStrategy::RetryStrategy(std::shared_ptr<Aws::Client::RetryStrategy> wrapped_strategy_)
: wrapped_strategy(std::move(wrapped_strategy_))
{
if (!wrapped_strategy)
wrapped_strategy = Aws::Client::InitRetryStrategy();
}
/// NOLINTNEXTLINE(google-runtime-int)
bool Client::RetryStrategy::ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const
{
if (error.GetResponseCode() == Aws::Http::HttpResponseCode::MOVED_PERMANENTLY)
return false;
return wrapped_strategy->ShouldRetry(error, attemptedRetries);
}
/// NOLINTNEXTLINE(google-runtime-int)
long Client::RetryStrategy::CalculateDelayBeforeNextRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const
{
return wrapped_strategy->CalculateDelayBeforeNextRetry(error, attemptedRetries);
}
/// NOLINTNEXTLINE(google-runtime-int)
long Client::RetryStrategy::GetMaxAttempts() const
{
return wrapped_strategy->GetMaxAttempts();
}
void Client::RetryStrategy::GetSendToken()
{
return wrapped_strategy->GetSendToken();
}
bool Client::RetryStrategy::HasSendToken()
{
return wrapped_strategy->HasSendToken();
}
void Client::RetryStrategy::RequestBookkeeping(const Aws::Client::HttpResponseOutcome& httpResponseOutcome)
{
return wrapped_strategy->RequestBookkeeping(httpResponseOutcome);
}
void Client::RetryStrategy::RequestBookkeeping(const Aws::Client::HttpResponseOutcome& httpResponseOutcome, const Aws::Client::AWSError<Aws::Client::CoreErrors>& lastError)
{
return wrapped_strategy->RequestBookkeeping(httpResponseOutcome, lastError);
}
bool Client::checkIfWrongRegionDefined(const std::string & bucket, const Aws::S3::S3Error & error, std::string & region) const
{
if (detect_region)
return false;
if (error.GetResponseCode() == Aws::Http::HttpResponseCode::BAD_REQUEST && error.GetExceptionName() == "AuthorizationHeaderMalformed")
{
region = GetErrorMarshaller()->ExtractRegion(error);
if (region.empty())
region = getRegionForBucket(bucket, /*force_detect*/ true);
assert(!explicit_region.empty());
if (region == explicit_region)
return false;
insertRegionOverride(bucket, region);
return true;
}
return false;
}
void Client::insertRegionOverride(const std::string & bucket, const std::string & region) const
{
std::lock_guard lock(cache->region_cache_mutex);
auto [it, inserted] = cache->region_for_bucket_cache.emplace(bucket, region);
if (inserted)
LOG_INFO(log, "Detected different region ('{}') for bucket {} than the one defined ('{}')", region, bucket, explicit_region);
}
Model::HeadObjectOutcome Client::HeadObject(const HeadObjectRequest & request) const
{
const auto & bucket = request.GetBucket();
if (auto region = getRegionForBucket(bucket); !region.empty())
{
if (!detect_region)
LOG_INFO(log, "Using region override {} for bucket {}", region, bucket);
request.overrideRegion(std::move(region));
}
if (auto uri = getURIForBucket(bucket); uri.has_value())
request.overrideURI(std::move(*uri));
auto result = Aws::S3::S3Client::HeadObject(request);
if (result.IsSuccess())
return result;
const auto & error = result.GetError();
std::string new_region;
if (checkIfWrongRegionDefined(bucket, error, new_region))
{
request.overrideRegion(new_region);
return HeadObject(request);
}
if (error.GetResponseCode() != Aws::Http::HttpResponseCode::MOVED_PERMANENTLY)
return result;
// maybe we detect a correct region
if (!detect_region)
{
if (auto region = GetErrorMarshaller()->ExtractRegion(error); !region.empty() && region != explicit_region)
{
request.overrideRegion(region);
insertRegionOverride(bucket, region);
}
}
auto bucket_uri = getURIForBucket(bucket);
if (!bucket_uri)
{
if (auto maybe_error = updateURIForBucketForHead(bucket); maybe_error.has_value())
return *maybe_error;
if (auto region = getRegionForBucket(bucket); !region.empty())
{
if (!detect_region)
LOG_INFO(log, "Using region override {} for bucket {}", region, bucket);
request.overrideRegion(std::move(region));
}
bucket_uri = getURIForBucket(bucket);
if (!bucket_uri)
{
LOG_ERROR(log, "Missing resolved URI for bucket {}, maybe the cache was cleaned", bucket);
return result;
}
}
const auto & current_uri_override = request.getURIOverride();
/// we already tried with this URI
if (current_uri_override && current_uri_override->uri == bucket_uri->uri)
{
LOG_INFO(log, "Getting redirected to the same invalid location {}", bucket_uri->uri.toString());
return result;
}
request.overrideURI(std::move(*bucket_uri));
return Aws::S3::S3Client::HeadObject(request);
}
Model::ListObjectsV2Outcome Client::ListObjectsV2(const ListObjectsV2Request & request) const
{
return doRequest(request, [this](const Model::ListObjectsV2Request & req) { return Aws::S3::S3Client::ListObjectsV2(req); });
}
Model::ListObjectsOutcome Client::ListObjects(const ListObjectsRequest & request) const
{
return doRequest(request, [this](const Model::ListObjectsRequest & req) { return Aws::S3::S3Client::ListObjects(req); });
}
Model::GetObjectOutcome Client::GetObject(const GetObjectRequest & request) const
{
return doRequest(request, [this](const Model::GetObjectRequest & req) { return Aws::S3::S3Client::GetObject(req); });
}
Model::AbortMultipartUploadOutcome Client::AbortMultipartUpload(const AbortMultipartUploadRequest & request) const
{
return doRequest(
request, [this](const Model::AbortMultipartUploadRequest & req) { return Aws::S3::S3Client::AbortMultipartUpload(req); });
}
Model::CreateMultipartUploadOutcome Client::CreateMultipartUpload(const CreateMultipartUploadRequest & request) const
{
return doRequest(
request, [this](const Model::CreateMultipartUploadRequest & req) { return Aws::S3::S3Client::CreateMultipartUpload(req); });
}
Model::CompleteMultipartUploadOutcome Client::CompleteMultipartUpload(const CompleteMultipartUploadRequest & request) const
{
return doRequest(
request, [this](const Model::CompleteMultipartUploadRequest & req) { return Aws::S3::S3Client::CompleteMultipartUpload(req); });
}
Model::CopyObjectOutcome Client::CopyObject(const CopyObjectRequest & request) const
{
return doRequest(request, [this](const Model::CopyObjectRequest & req) { return Aws::S3::S3Client::CopyObject(req); });
}
Model::PutObjectOutcome Client::PutObject(const PutObjectRequest & request) const
{
return doRequest(request, [this](const Model::PutObjectRequest & req) { return Aws::S3::S3Client::PutObject(req); });
}
Model::UploadPartOutcome Client::UploadPart(const UploadPartRequest & request) const
{
return doRequest(request, [this](const Model::UploadPartRequest & req) { return Aws::S3::S3Client::UploadPart(req); });
}
Model::UploadPartCopyOutcome Client::UploadPartCopy(const UploadPartCopyRequest & request) const
{
return doRequest(request, [this](const Model::UploadPartCopyRequest & req) { return Aws::S3::S3Client::UploadPartCopy(req); });
}
Model::DeleteObjectOutcome Client::DeleteObject(const DeleteObjectRequest & request) const
{
return doRequest(request, [this](const Model::DeleteObjectRequest & req) { return Aws::S3::S3Client::DeleteObject(req); });
}
Model::DeleteObjectsOutcome Client::DeleteObjects(const DeleteObjectsRequest & request) const
{
return doRequest(request, [this](const Model::DeleteObjectsRequest & req) { return Aws::S3::S3Client::DeleteObjects(req); });
}
std::string Client::getRegionForBucket(const std::string & bucket, bool force_detect) const
{
std::lock_guard lock(cache->region_cache_mutex);
if (auto it = cache->region_for_bucket_cache.find(bucket); it != cache->region_for_bucket_cache.end())
return it->second;
if (!force_detect && !detect_region)
return "";
LOG_INFO(log, "Resolving region for bucket {}", bucket);
Aws::S3::Model::HeadBucketRequest req;
req.SetBucket(bucket);
std::string region;
auto outcome = HeadBucket(req);
if (outcome.IsSuccess())
{
const auto & result = outcome.GetResult();
region = result.GetRegion();
}
else
{
static const std::string region_header = "x-amz-bucket-region";
const auto & headers = outcome.GetError().GetResponseHeaders();
if (auto it = headers.find(region_header); it != headers.end())
region = it->second;
}
if (region.empty())
{
LOG_INFO(log, "Failed resolving region for bucket {}", bucket);
return "";
}
LOG_INFO(log, "Found region {} for bucket {}", region, bucket);
auto [it, _] = cache->region_for_bucket_cache.emplace(bucket, std::move(region));
return it->second;
}
std::optional<S3::URI> Client::getURIFromError(const Aws::S3::S3Error & error) const
{
auto endpoint = GetErrorMarshaller()->ExtractEndpoint(error);
if (endpoint.empty())
return std::nullopt;
auto & s3_client = const_cast<Client &>(*this);
const auto * endpoint_provider = dynamic_cast<Aws::S3::Endpoint::S3DefaultEpProviderBase *>(s3_client.accessEndpointProvider().get());
auto resolved_endpoint = endpoint_provider->ResolveEndpoint({});
if (!resolved_endpoint.IsSuccess())
return std::nullopt;
auto uri = resolved_endpoint.GetResult().GetURI();
uri.SetAuthority(endpoint);
return S3::URI(uri.GetURIString());
}
// Do a list request because head requests don't have body in response
std::optional<Aws::S3::S3Error> Client::updateURIForBucketForHead(const std::string & bucket) const
{
ListObjectsV2Request req;
req.SetBucket(bucket);
req.SetMaxKeys(1);
auto result = ListObjectsV2(req);
if (result.IsSuccess())
return std::nullopt;
return result.GetError();
}
std::optional<S3::URI> Client::getURIForBucket(const std::string & bucket) const
{
std::lock_guard lock(cache->uri_cache_mutex);
if (auto it = cache->uri_for_bucket_cache.find(bucket); it != cache->uri_for_bucket_cache.end())
return it->second;
return std::nullopt;
}
void Client::updateURIForBucket(const std::string & bucket, S3::URI new_uri) const
{
std::lock_guard lock(cache->uri_cache_mutex);
if (auto it = cache->uri_for_bucket_cache.find(bucket); it != cache->uri_for_bucket_cache.end())
{
if (it->second.uri == new_uri.uri)
return;
LOG_INFO(log, "Updating URI for bucket {} to {}", bucket, new_uri.uri.toString());
it->second = std::move(new_uri);
return;
}
LOG_INFO(log, "Updating URI for bucket {} to {}", bucket, new_uri.uri.toString());
cache->uri_for_bucket_cache.emplace(bucket, std::move(new_uri));
}
void ClientCache::clearCache()
{
{
std::lock_guard lock(region_cache_mutex);
region_for_bucket_cache.clear();
}
{
std::lock_guard lock(uri_cache_mutex);
uri_for_bucket_cache.clear();
}
}
void ClientCacheRegistry::registerClient(const std::shared_ptr<ClientCache> & client_cache)
{
std::lock_guard lock(clients_mutex);
auto [it, inserted] = client_caches.emplace(client_cache.get(), client_cache);
if (!inserted)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Same S3 client registered twice");
}
void ClientCacheRegistry::unregisterClient(ClientCache * client)
{
std::lock_guard lock(clients_mutex);
auto erased = client_caches.erase(client);
if (erased == 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't unregister S3 client, either it was already unregistered or not registered at all");
}
void ClientCacheRegistry::clearCacheForAll()
{
std::lock_guard lock(clients_mutex);
for (auto it = client_caches.begin(); it != client_caches.end();)
{
if (auto locked_client = it->second.lock(); locked_client)
{
locked_client->clearCache();
++it;
}
else
{
LOG_INFO(&Poco::Logger::get("ClientCacheRegistry"), "Deleting leftover S3 client cache");
it = client_caches.erase(it);
}
}
}
}
}
#endif

309
src/IO/S3/Client.h Normal file
View File

@ -0,0 +1,309 @@
#pragma once
#include "config.h"
#if USE_AWS_S3
#include <Common/logger_useful.h>
#include <Common/assert_cast.h>
#include <base/scope_guard.h>
#include <IO/S3/URI.h>
#include <IO/S3/Requests.h>
#include <aws/core/client/DefaultRetryStrategy.h>
#include <aws/s3/S3Client.h>
#include <aws/s3/S3ServiceClientModel.h>
#include <aws/core/client/AWSErrorMarshaller.h>
#include <aws/core/client/RetryStrategy.h>
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int TOO_MANY_REDIRECTS;
}
namespace S3
{
namespace Model = Aws::S3::Model;
struct ClientCache
{
ClientCache() = default;
ClientCache(const ClientCache & other)
: region_for_bucket_cache(other.region_for_bucket_cache)
, uri_for_bucket_cache(other.uri_for_bucket_cache)
{}
ClientCache(ClientCache && other) = delete;
ClientCache & operator=(const ClientCache &) = delete;
ClientCache & operator=(ClientCache &&) = delete;
void clearCache();
std::mutex region_cache_mutex;
std::unordered_map<std::string, std::string> region_for_bucket_cache;
std::mutex uri_cache_mutex;
std::unordered_map<std::string, URI> uri_for_bucket_cache;
};
class ClientCacheRegistry
{
public:
static ClientCacheRegistry & instance()
{
static ClientCacheRegistry registry;
return registry;
}
void registerClient(const std::shared_ptr<ClientCache> & client_cache);
void unregisterClient(ClientCache * client);
void clearCacheForAll();
private:
ClientCacheRegistry() = default;
std::mutex clients_mutex;
std::unordered_map<ClientCache *, std::weak_ptr<ClientCache>> client_caches;
};
/// Client that improves the client from the AWS SDK
/// - inject region and URI into requests so they are rerouted to the correct destination if needed
/// - automatically detect endpoint and regions for each bucket and cache them
///
/// For this client to work correctly both Client::RetryStrategy and Requests defined in <IO/S3/Requests.h> should be used.
class Client : public Aws::S3::S3Client
{
public:
template <typename... Args>
static std::unique_ptr<Client> create(Args &&... args)
{
(verifyArgument(args), ...);
return std::unique_ptr<Client>(new Client(std::forward<Args>(args)...));
}
Client & operator=(const Client &) = delete;
Client(Client && other) = delete;
Client & operator=(Client &&) = delete;
~Client() override
{
try
{
ClientCacheRegistry::instance().unregisterClient(cache.get());
}
catch (...)
{
tryLogCurrentException(log);
throw;
}
}
/// Decorator for RetryStrategy needed for this client to work correctly
class RetryStrategy : public Aws::Client::RetryStrategy
{
public:
explicit RetryStrategy(std::shared_ptr<Aws::Client::RetryStrategy> wrapped_strategy_);
/// NOLINTNEXTLINE(google-runtime-int)
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override;
/// NOLINTNEXTLINE(google-runtime-int)
long CalculateDelayBeforeNextRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override;
/// NOLINTNEXTLINE(google-runtime-int)
long GetMaxAttempts() const override;
void GetSendToken() override;
bool HasSendToken() override;
void RequestBookkeeping(const Aws::Client::HttpResponseOutcome& httpResponseOutcome) override;
void RequestBookkeeping(const Aws::Client::HttpResponseOutcome& httpResponseOutcome, const Aws::Client::AWSError<Aws::Client::CoreErrors>& lastError) override;
private:
std::shared_ptr<Aws::Client::RetryStrategy> wrapped_strategy;
};
Model::HeadObjectOutcome HeadObject(const HeadObjectRequest & request) const;
Model::ListObjectsV2Outcome ListObjectsV2(const ListObjectsV2Request & request) const;
Model::ListObjectsOutcome ListObjects(const ListObjectsRequest & request) const;
Model::GetObjectOutcome GetObject(const GetObjectRequest & request) const;
Model::AbortMultipartUploadOutcome AbortMultipartUpload(const AbortMultipartUploadRequest & request) const;
Model::CreateMultipartUploadOutcome CreateMultipartUpload(const CreateMultipartUploadRequest & request) const;
Model::CompleteMultipartUploadOutcome CompleteMultipartUpload(const CompleteMultipartUploadRequest & request) const;
Model::UploadPartOutcome UploadPart(const UploadPartRequest & request) const;
Model::UploadPartCopyOutcome UploadPartCopy(const UploadPartCopyRequest & request) const;
Model::CopyObjectOutcome CopyObject(const CopyObjectRequest & request) const;
Model::PutObjectOutcome PutObject(const PutObjectRequest & request) const;
Model::DeleteObjectOutcome DeleteObject(const DeleteObjectRequest & request) const;
Model::DeleteObjectsOutcome DeleteObjects(const DeleteObjectsRequest & request) const;
private:
template <typename... Args>
explicit Client(size_t max_redirects_, Args &&... args)
: Aws::S3::S3Client(std::forward<Args>(args)...)
, max_redirects(max_redirects_)
, log(&Poco::Logger::get("S3Client"))
{
auto * endpoint_provider = dynamic_cast<Aws::S3::Endpoint::S3DefaultEpProviderBase *>(accessEndpointProvider().get());
endpoint_provider->GetBuiltInParameters().GetParameter("Region").GetString(explicit_region);
std::string endpoint;
endpoint_provider->GetBuiltInParameters().GetParameter("Endpoint").GetString(endpoint);
detect_region = explicit_region == Aws::Region::AWS_GLOBAL && endpoint.find(".amazonaws.com") != std::string::npos;
cache = std::make_shared<ClientCache>();
ClientCacheRegistry::instance().registerClient(cache);
}
Client(const Client & other)
: Aws::S3::S3Client(other)
, explicit_region(other.explicit_region)
, detect_region(other.detect_region)
, max_redirects(other.max_redirects)
, log(&Poco::Logger::get("S3Client"))
{
cache = std::make_shared<ClientCache>(*other.cache);
ClientCacheRegistry::instance().registerClient(cache);
}
/// Make regular functions private
using Aws::S3::S3Client::HeadObject;
using Aws::S3::S3Client::ListObjectsV2;
using Aws::S3::S3Client::ListObjects;
using Aws::S3::S3Client::GetObject;
using Aws::S3::S3Client::AbortMultipartUpload;
using Aws::S3::S3Client::CreateMultipartUpload;
using Aws::S3::S3Client::CompleteMultipartUpload;
using Aws::S3::S3Client::UploadPart;
using Aws::S3::S3Client::UploadPartCopy;
using Aws::S3::S3Client::CopyObject;
using Aws::S3::S3Client::PutObject;
using Aws::S3::S3Client::DeleteObject;
using Aws::S3::S3Client::DeleteObjects;
template <typename RequestType, typename RequestFn>
std::invoke_result_t<RequestFn, RequestType>
doRequest(const RequestType & request, RequestFn request_fn) const
{
const auto & bucket = request.GetBucket();
if (auto region = getRegionForBucket(bucket); !region.empty())
{
if (!detect_region)
LOG_INFO(log, "Using region override {} for bucket {}", region, bucket);
request.overrideRegion(std::move(region));
}
if (auto uri = getURIForBucket(bucket); uri.has_value())
request.overrideURI(std::move(*uri));
bool found_new_endpoint = false;
// if we found correct endpoint after 301 responses, update the cache for future requests
SCOPE_EXIT(
if (found_new_endpoint)
{
auto uri_override = request.getURIOverride();
assert(uri_override.has_value());
updateURIForBucket(bucket, std::move(*uri_override));
}
);
for (size_t attempt = 0; attempt <= max_redirects; ++attempt)
{
auto result = request_fn(request);
if (result.IsSuccess())
return result;
const auto & error = result.GetError();
std::string new_region;
if (checkIfWrongRegionDefined(bucket, error, new_region))
{
request.overrideRegion(new_region);
continue;
}
if (error.GetResponseCode() != Aws::Http::HttpResponseCode::MOVED_PERMANENTLY)
return result;
// maybe we detect a correct region
if (!detect_region)
{
if (auto region = GetErrorMarshaller()->ExtractRegion(error); !region.empty() && region != explicit_region)
{
request.overrideRegion(region);
insertRegionOverride(bucket, region);
}
}
// we possibly got new location, need to try with that one
auto new_uri = getURIFromError(error);
if (!new_uri)
return result;
const auto & current_uri_override = request.getURIOverride();
/// we already tried with this URI
if (current_uri_override && current_uri_override->uri == new_uri->uri)
{
LOG_INFO(log, "Getting redirected to the same invalid location {}", new_uri->uri.toString());
return result;
}
found_new_endpoint = true;
request.overrideURI(*new_uri);
}
throw Exception(ErrorCodes::TOO_MANY_REDIRECTS, "Too many redirects");
}
void updateURIForBucket(const std::string & bucket, S3::URI new_uri) const;
std::optional<S3::URI> getURIFromError(const Aws::S3::S3Error & error) const;
std::optional<Aws::S3::S3Error> updateURIForBucketForHead(const std::string & bucket) const;
std::string getRegionForBucket(const std::string & bucket, bool force_detect = false) const;
std::optional<S3::URI> getURIForBucket(const std::string & bucket) const;
bool checkIfWrongRegionDefined(const std::string & bucket, const Aws::S3::S3Error & error, std::string & region) const;
void insertRegionOverride(const std::string & bucket, const std::string & region) const;
template <typename T>
static void verifyArgument(const T & /*arg*/)
{}
template <std::derived_from<Aws::Client::ClientConfiguration> T>
static void verifyArgument(const T & client_config)
{
if (!client_config.retryStrategy)
throw Exception(ErrorCodes::LOGICAL_ERROR, "The S3 client can only be used with Client::RetryStrategy, define it in the client configuration");
assert_cast<const RetryStrategy &>(*client_config.retryStrategy);
}
std::string explicit_region;
mutable bool detect_region = true;
mutable std::shared_ptr<ClientCache> cache;
const size_t max_redirects;
Poco::Logger * log;
};
}
}
#endif

87
src/IO/S3/Requests.h Normal file
View File

@ -0,0 +1,87 @@
#pragma once
#include "config.h"
#if USE_AWS_S3
#include <IO/S3/URI.h>
#include <aws/core/endpoint/EndpointParameter.h>
#include <aws/s3/model/HeadObjectRequest.h>
#include <aws/s3/model/ListObjectsV2Request.h>
#include <aws/s3/model/ListObjectsRequest.h>
#include <aws/s3/model/GetObjectRequest.h>
#include <aws/s3/model/AbortMultipartUploadRequest.h>
#include <aws/s3/model/CreateMultipartUploadRequest.h>
#include <aws/s3/model/CompleteMultipartUploadRequest.h>
#include <aws/s3/model/CopyObjectRequest.h>
#include <aws/s3/model/PutObjectRequest.h>
#include <aws/s3/model/UploadPartRequest.h>
#include <aws/s3/model/UploadPartCopyRequest.h>
#include <aws/s3/model/DeleteObjectRequest.h>
#include <aws/s3/model/DeleteObjectsRequest.h>
namespace DB::S3
{
namespace Model = Aws::S3::Model;
template <typename BaseRequest>
class ExtendedRequest : public BaseRequest
{
public:
Aws::Endpoint::EndpointParameters GetEndpointContextParams() const override
{
auto params = BaseRequest::GetEndpointContextParams();
if (!region_override.empty())
params.emplace_back("Region", region_override);
if (uri_override.has_value())
{
static const Aws::String AWS_S3_FORCE_PATH_STYLE = "ForcePathStyle";
params.emplace_back(AWS_S3_FORCE_PATH_STYLE, !uri_override->is_virtual_hosted_style);
params.emplace_back("Endpoint", uri_override->endpoint);
}
return params;
}
void overrideRegion(std::string region) const
{
region_override = std::move(region);
}
void overrideURI(S3::URI uri) const
{
uri_override = std::move(uri);
}
const auto & getURIOverride() const
{
return uri_override;
}
protected:
mutable std::string region_override;
mutable std::optional<S3::URI> uri_override;
};
using HeadObjectRequest = ExtendedRequest<Model::HeadObjectRequest>;
using ListObjectsV2Request = ExtendedRequest<Model::ListObjectsV2Request>;
using ListObjectsRequest = ExtendedRequest<Model::ListObjectsRequest>;
using GetObjectRequest = ExtendedRequest<Model::GetObjectRequest>;
using CreateMultipartUploadRequest = ExtendedRequest<Model::CreateMultipartUploadRequest>;
using CompleteMultipartUploadRequest = ExtendedRequest<Model::CompleteMultipartUploadRequest>;
using AbortMultipartUploadRequest = ExtendedRequest<Model::AbortMultipartUploadRequest>;
using UploadPartRequest = ExtendedRequest<Model::UploadPartRequest>;
using UploadPartCopyRequest = ExtendedRequest<Model::UploadPartCopyRequest>;
using PutObjectRequest = ExtendedRequest<Model::PutObjectRequest>;
using CopyObjectRequest = ExtendedRequest<Model::CopyObjectRequest>;
using DeleteObjectRequest = ExtendedRequest<Model::DeleteObjectRequest>;
using DeleteObjectsRequest = ExtendedRequest<Model::DeleteObjectsRequest>;
}
#endif

119
src/IO/S3/URI.cpp Normal file
View File

@ -0,0 +1,119 @@
#include <IO/S3/URI.h>
#if USE_AWS_S3
#include <Common/Exception.h>
#include <Common/quoteString.h>
#include <boost/algorithm/string/case_conv.hpp>
#include <re2/re2.h>
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
namespace S3
{
URI::URI(const std::string & uri_)
{
/// Case when bucket name represented in domain name of S3 URL.
/// E.g. (https://bucket-name.s3.Region.amazonaws.com/key)
/// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access
static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3|cos|obs|oss)([.\-][a-z0-9\-.:]+))");
/// Case when bucket name and key represented in path of S3 URL.
/// E.g. (https://s3.Region.amazonaws.com/bucket-name/key)
/// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access
static const RE2 path_style_pattern("^/([^/]*)/(.*)");
static constexpr auto S3 = "S3";
static constexpr auto COSN = "COSN";
static constexpr auto COS = "COS";
static constexpr auto OBS = "OBS";
static constexpr auto OSS = "OSS";
uri = Poco::URI(uri_);
storage_name = S3;
if (uri.getHost().empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host is empty in S3 URI.");
/// Extract object version ID from query string.
bool has_version_id = false;
for (const auto & [query_key, query_value] : uri.getQueryParameters())
if (query_key == "versionId")
{
version_id = query_value;
has_version_id = true;
}
/// Poco::URI will ignore '?' when parsing the path, but if there is a vestionId in the http parameter,
/// '?' can not be used as a wildcard, otherwise it will be ambiguous.
/// If no "vertionId" in the http parameter, '?' can be used as a wildcard.
/// It is necessary to encode '?' to avoid deletion during parsing path.
if (!has_version_id && uri_.find('?') != String::npos)
{
String uri_with_question_mark_encode;
Poco::URI::encode(uri_, "?", uri_with_question_mark_encode);
uri = Poco::URI(uri_with_question_mark_encode);
}
String name;
String endpoint_authority_from_uri;
if (re2::RE2::FullMatch(uri.getAuthority(), virtual_hosted_style_pattern, &bucket, &name, &endpoint_authority_from_uri))
{
is_virtual_hosted_style = true;
endpoint = uri.getScheme() + "://" + name + endpoint_authority_from_uri;
validateBucket(bucket, uri);
if (!uri.getPath().empty())
{
/// Remove leading '/' from path to extract key.
key = uri.getPath().substr(1);
}
boost::to_upper(name);
if (name != S3 && name != COS && name != OBS && name != OSS)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Object storage system name is unrecognized in virtual hosted style S3 URI: {}",
quoteString(name));
if (name == S3)
storage_name = name;
else if (name == OBS)
storage_name = OBS;
else if (name == OSS)
storage_name = OSS;
else
storage_name = COSN;
}
else if (re2::RE2::PartialMatch(uri.getPath(), path_style_pattern, &bucket, &key))
{
is_virtual_hosted_style = false;
endpoint = uri.getScheme() + "://" + uri.getAuthority();
validateBucket(bucket, uri);
}
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket or key name are invalid in S3 URI.");
}
void URI::validateBucket(const String & bucket, const Poco::URI & uri)
{
/// S3 specification requires at least 3 and at most 63 characters in bucket name.
/// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
if (bucket.length() < 3 || bucket.length() > 63)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket name length is out of bounds in virtual hosted style S3 URI: {}{}",
quoteString(bucket), !uri.empty() ? " (" + uri.toString() + ")" : "");
}
}
}
#endif

40
src/IO/S3/URI.h Normal file
View File

@ -0,0 +1,40 @@
#pragma once
#include <string>
#include "config.h"
#if USE_AWS_S3
#include <Poco/URI.h>
namespace DB::S3
{
/**
* Represents S3 URI.
*
* The following patterns are allowed:
* s3://bucket/key
* http(s)://endpoint/bucket/key
*/
struct URI
{
Poco::URI uri;
// Custom endpoint if URI scheme is not S3.
std::string endpoint;
std::string bucket;
std::string key;
std::string version_id;
std::string storage_name;
bool is_virtual_hosted_style;
explicit URI(const std::string & uri_);
static void validateBucket(const std::string & bucket, const Poco::URI & uri);
};
}
#endif

View File

@ -9,15 +9,7 @@
#include <IO/SeekableReadBuffer.h>
#include <IO/StdStreamFromReadBuffer.h>
#include <aws/s3/S3Client.h>
#include <aws/s3/model/AbortMultipartUploadRequest.h>
#include <aws/s3/model/CompleteMultipartUploadRequest.h>
#include <aws/s3/model/CopyObjectRequest.h>
#include <aws/s3/model/CreateMultipartUploadRequest.h>
#include <aws/s3/model/PutObjectRequest.h>
#include <aws/s3/model/UploadPartCopyRequest.h>
#include <aws/s3/model/UploadPartRequest.h>
#include <IO/S3/Requests.h>
namespace ProfileEvents
{
@ -54,7 +46,7 @@ namespace
{
public:
UploadHelper(
const std::shared_ptr<const Aws::S3::S3Client> & client_ptr_,
const std::shared_ptr<const S3::Client> & client_ptr_,
const String & dest_bucket_,
const String & dest_key_,
const S3Settings::RequestSettings & request_settings_,
@ -77,7 +69,7 @@ namespace
virtual ~UploadHelper() = default;
protected:
std::shared_ptr<const Aws::S3::S3Client> client_ptr;
std::shared_ptr<const S3::Client> client_ptr;
const String & dest_bucket;
const String & dest_key;
const S3Settings::RequestSettings & request_settings;
@ -108,7 +100,7 @@ namespace
void createMultipartUpload()
{
Aws::S3::Model::CreateMultipartUploadRequest request;
S3::CreateMultipartUploadRequest request;
request.SetBucket(dest_bucket);
request.SetKey(dest_key);
@ -147,7 +139,7 @@ namespace
if (part_tags.empty())
throw Exception(ErrorCodes::S3_ERROR, "Failed to complete multipart upload. No parts have uploaded");
Aws::S3::Model::CompleteMultipartUploadRequest request;
S3::CompleteMultipartUploadRequest request;
request.SetBucket(dest_bucket);
request.SetKey(dest_key);
request.SetUploadId(multipart_upload_id);
@ -194,7 +186,7 @@ namespace
void abortMultipartUpload()
{
LOG_TRACE(log, "Aborting multipart upload. Bucket: {}, Key: {}, Upload_id: {}", dest_bucket, dest_key, multipart_upload_id);
Aws::S3::Model::AbortMultipartUploadRequest abort_request;
S3::AbortMultipartUploadRequest abort_request;
abort_request.SetBucket(dest_bucket);
abort_request.SetKey(dest_key);
abort_request.SetUploadId(multipart_upload_id);
@ -404,7 +396,7 @@ namespace
const std::function<std::unique_ptr<SeekableReadBuffer>()> & create_read_buffer_,
size_t offset_,
size_t size_,
const std::shared_ptr<const Aws::S3::S3Client> & client_ptr_,
const std::shared_ptr<const S3::Client> & client_ptr_,
const String & dest_bucket_,
const String & dest_key_,
const S3Settings::RequestSettings & request_settings_,
@ -436,12 +428,12 @@ namespace
void performSinglepartUpload()
{
Aws::S3::Model::PutObjectRequest request;
S3::PutObjectRequest request;
fillPutRequest(request);
processPutRequest(request);
}
void fillPutRequest(Aws::S3::Model::PutObjectRequest & request)
void fillPutRequest(S3::PutObjectRequest & request)
{
auto read_buffer = std::make_unique<LimitSeekableReadBuffer>(create_read_buffer(), offset, size);
@ -461,7 +453,7 @@ namespace
request.SetContentType("binary/octet-stream");
}
void processPutRequest(const Aws::S3::Model::PutObjectRequest & request)
void processPutRequest(const S3::PutObjectRequest & request)
{
size_t max_retries = std::max(request_settings.max_unexpected_write_error_retries, 1UL);
for (size_t retries = 1;; ++retries)
@ -526,7 +518,7 @@ namespace
auto read_buffer = std::make_unique<LimitSeekableReadBuffer>(create_read_buffer(), part_offset, part_size);
/// Setup request.
auto request = std::make_unique<Aws::S3::Model::UploadPartRequest>();
auto request = std::make_unique<S3::UploadPartRequest>();
request->SetBucket(dest_bucket);
request->SetKey(dest_key);
request->SetPartNumber(static_cast<int>(part_number));
@ -542,7 +534,7 @@ namespace
String processUploadPartRequest(Aws::AmazonWebServiceRequest & request) override
{
auto & req = typeid_cast<Aws::S3::Model::UploadPartRequest &>(request);
auto & req = typeid_cast<S3::UploadPartRequest &>(request);
ProfileEvents::increment(ProfileEvents::S3UploadPart);
if (for_disk_s3)
@ -564,7 +556,7 @@ namespace
{
public:
CopyFileHelper(
const std::shared_ptr<const Aws::S3::S3Client> & client_ptr_,
const std::shared_ptr<const S3::Client> & client_ptr_,
const String & src_bucket_,
const String & src_key_,
size_t src_offset_,
@ -602,12 +594,12 @@ namespace
void performSingleOperationCopy()
{
Aws::S3::Model::CopyObjectRequest request;
S3::CopyObjectRequest request;
fillCopyRequest(request);
processCopyRequest(request);
}
void fillCopyRequest(Aws::S3::Model::CopyObjectRequest & request)
void fillCopyRequest(S3::CopyObjectRequest & request)
{
request.SetCopySource(src_bucket + "/" + src_key);
request.SetBucket(dest_bucket);
@ -627,7 +619,7 @@ namespace
request.SetContentType("binary/octet-stream");
}
void processCopyRequest(const Aws::S3::Model::CopyObjectRequest & request)
void processCopyRequest(const S3::CopyObjectRequest & request)
{
size_t max_retries = std::max(request_settings.max_unexpected_write_error_retries, 1UL);
for (size_t retries = 1;; ++retries)
@ -689,7 +681,7 @@ namespace
std::unique_ptr<Aws::AmazonWebServiceRequest> fillUploadPartRequest(size_t part_number, size_t part_offset, size_t part_size) override
{
auto request = std::make_unique<Aws::S3::Model::UploadPartCopyRequest>();
auto request = std::make_unique<S3::UploadPartCopyRequest>();
/// Make a copy request to copy a part.
request->SetCopySource(src_bucket + "/" + src_key);
@ -704,7 +696,7 @@ namespace
String processUploadPartRequest(Aws::AmazonWebServiceRequest & request) override
{
auto & req = typeid_cast<Aws::S3::Model::UploadPartCopyRequest &>(request);
auto & req = typeid_cast<S3::UploadPartCopyRequest &>(request);
ProfileEvents::increment(ProfileEvents::S3UploadPartCopy);
if (for_disk_s3)
@ -727,7 +719,7 @@ void copyDataToS3File(
const std::function<std::unique_ptr<SeekableReadBuffer>()> & create_read_buffer,
size_t offset,
size_t size,
const std::shared_ptr<const Aws::S3::S3Client> & dest_s3_client,
const std::shared_ptr<const S3::Client> & dest_s3_client,
const String & dest_bucket,
const String & dest_key,
const S3Settings::RequestSettings & settings,
@ -741,7 +733,7 @@ void copyDataToS3File(
void copyS3File(
const std::shared_ptr<const Aws::S3::S3Client> & s3_client,
const std::shared_ptr<const S3::Client> & s3_client,
const String & src_bucket,
const String & src_key,
size_t src_offset,

View File

@ -7,7 +7,6 @@
#include <Storages/StorageS3Settings.h>
#include <Interpreters/threadPoolCallbackRunner.h>
#include <base/types.h>
#include <aws/s3/S3Client.h>
#include <functional>
#include <memory>
@ -21,7 +20,7 @@ class SeekableReadBuffer;
/// however copyS3File() is faster and spends less network traffic and memory.
/// The parameters `src_offset` and `src_size` specify a part in the source to copy.
void copyS3File(
const std::shared_ptr<const Aws::S3::S3Client> & s3_client,
const std::shared_ptr<const S3::Client> & s3_client,
const String & src_bucket,
const String & src_key,
size_t src_offset,
@ -42,7 +41,7 @@ void copyDataToS3File(
const std::function<std::unique_ptr<SeekableReadBuffer>()> & create_read_buffer,
size_t offset,
size_t size,
const std::shared_ptr<const Aws::S3::S3Client> & dest_s3_client,
const std::shared_ptr<const S3::Client> & dest_s3_client,
const String & dest_bucket,
const String & dest_key,
const S3Settings::RequestSettings & settings,

View File

@ -1,11 +1,6 @@
#include <IO/S3/getObjectInfo.h>
#if USE_AWS_S3
#include <aws/s3/S3Client.h>
#include <aws/s3/model/GetObjectAttributesRequest.h>
#include <aws/s3/model/GetObjectRequest.h>
#include <aws/s3/model/HeadObjectRequest.h>
namespace ErrorCodes
{
@ -30,13 +25,13 @@ namespace DB::S3
namespace
{
Aws::S3::Model::HeadObjectOutcome headObject(
const Aws::S3::S3Client & client, const String & bucket, const String & key, const String & version_id, bool for_disk_s3)
const S3::Client & client, const String & bucket, const String & key, const String & version_id, bool for_disk_s3)
{
ProfileEvents::increment(ProfileEvents::S3HeadObject);
if (for_disk_s3)
ProfileEvents::increment(ProfileEvents::DiskS3HeadObject);
Aws::S3::Model::HeadObjectRequest req;
S3::HeadObjectRequest req;
req.SetBucket(bucket);
req.SetKey(key);
@ -46,93 +41,25 @@ namespace
return client.HeadObject(req);
}
Aws::S3::Model::GetObjectAttributesOutcome getObjectAttributes(const Aws::S3::S3Client & client, const String & bucket, const String & key, const String & version_id, bool for_disk_s3)
{
ProfileEvents::increment(ProfileEvents::S3GetObjectAttributes);
if (for_disk_s3)
ProfileEvents::increment(ProfileEvents::DiskS3GetObjectAttributes);
Aws::S3::Model::GetObjectAttributesRequest req;
req.SetBucket(bucket);
req.SetKey(key);
if (!version_id.empty())
req.SetVersionId(version_id);
req.SetObjectAttributes({Aws::S3::Model::ObjectAttributes::ObjectSize});
return client.GetObjectAttributes(req);
}
Aws::S3::Model::GetObjectOutcome getObjectDummy(const Aws::S3::S3Client & client, const String & bucket, const String & key, const String & version_id, bool for_disk_s3)
{
ProfileEvents::increment(ProfileEvents::S3GetObject);
if (for_disk_s3)
ProfileEvents::increment(ProfileEvents::DiskS3GetObject);
Aws::S3::Model::GetObjectRequest req;
req.SetBucket(bucket);
req.SetKey(key);
if (!version_id.empty())
req.SetVersionId(version_id);
/// Only the first byte will be read.
/// We don't need that first byte but the range should be set otherwise the entire object will be read.
req.SetRange("bytes=0-0");
return client.GetObject(req);
}
/// Performs a request to get the size and last modification time of an object.
/// The function performs either HeadObject or GetObjectAttributes request depending on the endpoint.
std::pair<std::optional<ObjectInfo>, Aws::S3::S3Error> tryGetObjectInfo(
const Aws::S3::S3Client & client, const String & bucket, const String & key, const String & version_id,
const S3Settings::RequestSettings & request_settings, bool with_metadata, bool for_disk_s3)
const S3::Client & client, const String & bucket, const String & key, const String & version_id,
const S3Settings::RequestSettings & /*request_settings*/, bool with_metadata, bool for_disk_s3)
{
if (request_settings.allow_head_object_request)
{
auto outcome = headObject(client, bucket, key, version_id, for_disk_s3);
if (!outcome.IsSuccess())
return {std::nullopt, outcome.GetError()};
auto outcome = headObject(client, bucket, key, version_id, for_disk_s3);
if (!outcome.IsSuccess())
return {std::nullopt, outcome.GetError()};
const auto & result = outcome.GetResult();
ObjectInfo object_info;
object_info.size = static_cast<size_t>(result.GetContentLength());
object_info.last_modification_time = result.GetLastModified().Millis() / 1000;
const auto & result = outcome.GetResult();
ObjectInfo object_info;
object_info.size = static_cast<size_t>(result.GetContentLength());
object_info.last_modification_time = result.GetLastModified().Millis() / 1000;
if (with_metadata)
object_info.metadata = result.GetMetadata();
if (with_metadata)
object_info.metadata = result.GetMetadata();
return {object_info, {}};
}
else
{
ObjectInfo object_info;
{
auto outcome = getObjectAttributes(client, bucket, key, version_id, for_disk_s3);
if (!outcome.IsSuccess())
return {std::nullopt, outcome.GetError()};
const auto & result = outcome.GetResult();
object_info.size = static_cast<size_t>(result.GetObjectSize());
object_info.last_modification_time = result.GetLastModified().Millis() / 1000;
}
if (with_metadata)
{
auto outcome = getObjectDummy(client, bucket, key, version_id, for_disk_s3);
if (!outcome.IsSuccess())
return {std::nullopt, outcome.GetError()};
const auto & result = outcome.GetResult();
object_info.metadata = result.GetMetadata();
}
return {object_info, {}};
}
return {object_info, {}};
}
}
@ -143,7 +70,7 @@ bool isNotFoundError(Aws::S3::S3Errors error)
}
ObjectInfo getObjectInfo(
const Aws::S3::S3Client & client,
const S3::Client & client,
const String & bucket,
const String & key,
const String & version_id,
@ -167,7 +94,7 @@ ObjectInfo getObjectInfo(
}
size_t getObjectSize(
const Aws::S3::S3Client & client,
const S3::Client & client,
const String & bucket,
const String & key,
const String & version_id,
@ -179,7 +106,7 @@ size_t getObjectSize(
}
bool objectExists(
const Aws::S3::S3Client & client,
const S3::Client & client,
const String & bucket,
const String & key,
const String & version_id,
@ -199,7 +126,7 @@ bool objectExists(
}
void checkObjectExists(
const Aws::S3::S3Client & client,
const S3::Client & client,
const String & bucket,
const String & key,
const String & version_id,

View File

@ -5,7 +5,7 @@
#if USE_AWS_S3
#include <Storages/StorageS3Settings.h>
#include <base/types.h>
#include <aws/s3/S3Client.h>
#include <IO/S3/Client.h>
namespace DB::S3
@ -20,7 +20,7 @@ struct ObjectInfo
};
ObjectInfo getObjectInfo(
const Aws::S3::S3Client & client,
const S3::Client & client,
const String & bucket,
const String & key,
const String & version_id = {},
@ -30,7 +30,7 @@ ObjectInfo getObjectInfo(
bool throw_on_error = true);
size_t getObjectSize(
const Aws::S3::S3Client & client,
const S3::Client & client,
const String & bucket,
const String & key,
const String & version_id = {},
@ -39,7 +39,7 @@ size_t getObjectSize(
bool throw_on_error = true);
bool objectExists(
const Aws::S3::S3Client & client,
const S3::Client & client,
const String & bucket,
const String & key,
const String & version_id = {},
@ -48,7 +48,7 @@ bool objectExists(
/// Throws an exception if a specified object doesn't exist. `description` is used as a part of the error message.
void checkObjectExists(
const Aws::S3::S3Client & client,
const S3::Client & client,
const String & bucket,
const String & key,
const String & version_id = {},

View File

@ -19,13 +19,13 @@
#include <aws/core/client/CoreErrors.h>
#include <aws/core/client/RetryStrategy.h>
#include <aws/core/http/URI.h>
#include <aws/s3/S3Client.h>
#include <Common/RemoteHostFilter.h>
#include <IO/ReadBufferFromS3.h>
#include <IO/ReadHelpers.h>
#include <IO/ReadSettings.h>
#include <IO/S3Common.h>
#include <IO/S3/Client.h>
#include <IO/HTTPHeaderEntries.h>
#include <Storages/StorageS3Settings.h>
@ -102,7 +102,7 @@ TEST(IOTestAwsS3Client, AppendExtraSSECHeaders)
bool use_environment_credentials = false;
bool use_insecure_imds_request = false;
std::shared_ptr<Aws::S3::S3Client> client = DB::S3::ClientFactory::instance().create(
std::shared_ptr<DB::S3::Client> client = DB::S3::ClientFactory::instance().create(
client_configuration,
uri.is_virtual_hosted_style,
access_key_id,

View File

@ -16,7 +16,6 @@
# include <aws/core/auth/AWSCredentialsProvider.h>
# include <aws/core/auth/AWSCredentialsProviderChain.h>
# include <aws/core/auth/STSCredentialsProvider.h>
# include <aws/core/client/DefaultRetryStrategy.h>
# include <aws/core/client/SpecifiedRetryableErrorsRetryStrategy.h>
# include <aws/core/platform/Environment.h>
# include <aws/core/platform/OSVersionInfo.h>
@ -26,16 +25,12 @@
# include <aws/core/utils/HashingUtils.h>
# include <aws/core/utils/UUID.h>
# include <aws/core/http/HttpClientFactory.h>
# include <aws/s3/S3Client.h>
# include <aws/s3/model/GetObjectAttributesRequest.h>
# include <aws/s3/model/GetObjectRequest.h>
# include <aws/s3/model/HeadObjectRequest.h>
# include <IO/S3/PocoHTTPClientFactory.h>
# include <IO/S3/PocoHTTPClient.h>
# include <Poco/URI.h>
# include <re2/re2.h>
# include <boost/algorithm/string/case_conv.hpp>
# include <IO/S3/Client.h>
# include <IO/S3/URI.h>
# include <IO/S3/Requests.h>
# include <Common/logger_useful.h>
# include <fstream>
@ -712,7 +707,6 @@ namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int S3_ERROR;
}
@ -738,7 +732,7 @@ namespace S3
return ret;
}
std::unique_ptr<Aws::S3::S3Client> ClientFactory::create( // NOLINT
std::unique_ptr<S3::Client> ClientFactory::create( // NOLINT
const PocoHTTPClientConfiguration & cfg_,
bool is_virtual_hosted_style,
const String & access_key_id,
@ -753,7 +747,7 @@ namespace S3
if (!server_side_encryption_customer_key_base64.empty())
{
/// See S3Client::GeneratePresignedUrlWithSSEC().
/// See Client::GeneratePresignedUrlWithSSEC().
headers.push_back({Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
Aws::S3::Model::ServerSideEncryptionMapper::GetNameForServerSideEncryption(Aws::S3::Model::ServerSideEncryption::AES256)});
@ -776,7 +770,9 @@ namespace S3
use_environment_credentials,
use_insecure_imds_request);
return std::make_unique<Aws::S3::S3Client>(
client_configuration.retryStrategy = std::make_shared<Client::RetryStrategy>(std::move(client_configuration.retryStrategy));
return Client::create(
client_configuration.s3_max_redirects,
std::move(credentials_provider),
std::move(client_configuration), // Client configuration.
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
@ -802,100 +798,6 @@ namespace S3
get_request_throttler,
put_request_throttler);
}
URI::URI(const std::string & uri_)
{
/// Case when bucket name represented in domain name of S3 URL.
/// E.g. (https://bucket-name.s3.Region.amazonaws.com/key)
/// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access
static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3|cos|obs|oss)([.\-][a-z0-9\-.:]+))");
/// Case when bucket name and key represented in path of S3 URL.
/// E.g. (https://s3.Region.amazonaws.com/bucket-name/key)
/// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#path-style-access
static const RE2 path_style_pattern("^/([^/]*)/(.*)");
static constexpr auto S3 = "S3";
static constexpr auto COSN = "COSN";
static constexpr auto COS = "COS";
static constexpr auto OBS = "OBS";
static constexpr auto OSS = "OSS";
uri = Poco::URI(uri_);
storage_name = S3;
if (uri.getHost().empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host is empty in S3 URI.");
/// Extract object version ID from query string.
bool has_version_id = false;
for (const auto & [query_key, query_value] : uri.getQueryParameters())
if (query_key == "versionId")
{
version_id = query_value;
has_version_id = true;
}
/// Poco::URI will ignore '?' when parsing the path, but if there is a vestionId in the http parameter,
/// '?' can not be used as a wildcard, otherwise it will be ambiguous.
/// If no "vertionId" in the http parameter, '?' can be used as a wildcard.
/// It is necessary to encode '?' to avoid deletion during parsing path.
if (!has_version_id && uri_.find('?') != String::npos)
{
String uri_with_question_mark_encode;
Poco::URI::encode(uri_, "?", uri_with_question_mark_encode);
uri = Poco::URI(uri_with_question_mark_encode);
}
String name;
String endpoint_authority_from_uri;
if (re2::RE2::FullMatch(uri.getAuthority(), virtual_hosted_style_pattern, &bucket, &name, &endpoint_authority_from_uri))
{
is_virtual_hosted_style = true;
endpoint = uri.getScheme() + "://" + name + endpoint_authority_from_uri;
validateBucket(bucket, uri);
if (!uri.getPath().empty())
{
/// Remove leading '/' from path to extract key.
key = uri.getPath().substr(1);
}
boost::to_upper(name);
if (name != S3 && name != COS && name != OBS && name != OSS)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Object storage system name is unrecognized in virtual hosted style S3 URI: {}",
quoteString(name));
if (name == S3)
storage_name = name;
else if (name == OBS)
storage_name = OBS;
else if (name == OSS)
storage_name = OSS;
else
storage_name = COSN;
}
else if (re2::RE2::PartialMatch(uri.getPath(), path_style_pattern, &bucket, &key))
{
is_virtual_hosted_style = false;
endpoint = uri.getScheme() + "://" + uri.getAuthority();
validateBucket(bucket, uri);
}
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket or key name are invalid in S3 URI.");
}
void URI::validateBucket(const String & bucket, const Poco::URI & uri)
{
/// S3 specification requires at least 3 and at most 63 characters in bucket name.
/// https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
if (bucket.length() < 3 || bucket.length() > 63)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bucket name length is out of bounds in virtual hosted style S3 URI: {}{}",
quoteString(bucket), !uri.empty() ? " (" + uri.toString() + ")" : "");
}
}
}

View File

@ -14,12 +14,13 @@
#include <Common/Exception.h>
#include <Common/Throttler_fwd.h>
#include <Poco/URI.h>
#include <IO/S3/Client.h>
#include <IO/S3/URI.h>
#include <aws/core/Aws.h>
#include <aws/s3/S3Errors.h>
namespace Aws::S3 { class S3Client; }
namespace Aws::S3 { class Client; }
namespace DB
{
@ -60,7 +61,6 @@ private:
};
}
namespace DB::S3
{
@ -71,7 +71,7 @@ public:
static ClientFactory & instance();
std::unique_ptr<Aws::S3::S3Client> create(
std::unique_ptr<S3::Client> create(
const PocoHTTPClientConfiguration & cfg,
bool is_virtual_hosted_style,
const String & access_key_id,
@ -97,30 +97,6 @@ private:
std::atomic<bool> s3_requests_logging_enabled;
};
/**
* Represents S3 URI.
*
* The following patterns are allowed:
* s3://bucket/key
* http(s)://endpoint/bucket/key
*/
struct URI
{
Poco::URI uri;
// Custom endpoint if URI scheme is not S3.
String endpoint;
String bucket;
String key;
String version_id;
String storage_name;
bool is_virtual_hosted_style;
explicit URI(const std::string & uri_);
static void validateBucket(const String & bucket, const Poco::URI & uri);
};
}
#endif

View File

@ -9,15 +9,11 @@
#include <IO/WriteBufferFromS3.h>
#include <IO/WriteHelpers.h>
#include <IO/S3Common.h>
#include <IO/S3/Requests.h>
#include <IO/S3/getObjectInfo.h>
#include <Interpreters/Context.h>
#include <aws/s3/S3Client.h>
#include <aws/s3/model/CreateMultipartUploadRequest.h>
#include <aws/s3/model/CompleteMultipartUploadRequest.h>
#include <aws/s3/model/PutObjectRequest.h>
#include <aws/s3/model/UploadPartRequest.h>
#include <aws/s3/model/HeadObjectRequest.h>
#include <aws/s3/model/StorageClass.h>
#include <utility>
@ -28,13 +24,11 @@ namespace ProfileEvents
extern const Event WriteBufferFromS3Bytes;
extern const Event S3WriteBytes;
extern const Event S3HeadObject;
extern const Event S3CreateMultipartUpload;
extern const Event S3CompleteMultipartUpload;
extern const Event S3UploadPart;
extern const Event S3PutObject;
extern const Event DiskS3HeadObject;
extern const Event DiskS3CreateMultipartUpload;
extern const Event DiskS3CompleteMultipartUpload;
extern const Event DiskS3UploadPart;
@ -59,7 +53,7 @@ namespace ErrorCodes
struct WriteBufferFromS3::UploadPartTask
{
Aws::S3::Model::UploadPartRequest req;
S3::UploadPartRequest req;
bool is_finished = false;
std::string tag;
std::exception_ptr exception;
@ -67,13 +61,13 @@ struct WriteBufferFromS3::UploadPartTask
struct WriteBufferFromS3::PutObjectTask
{
Aws::S3::Model::PutObjectRequest req;
S3::PutObjectRequest req;
bool is_finished = false;
std::exception_ptr exception;
};
WriteBufferFromS3::WriteBufferFromS3(
std::shared_ptr<const Aws::S3::S3Client> client_ptr_,
std::shared_ptr<const S3::Client> client_ptr_,
const String & bucket_,
const String & key_,
const S3Settings::RequestSettings & request_settings_,
@ -191,7 +185,7 @@ void WriteBufferFromS3::finalizeImpl()
void WriteBufferFromS3::createMultipartUpload()
{
Aws::S3::Model::CreateMultipartUploadRequest req;
DB::S3::CreateMultipartUploadRequest req;
req.SetBucket(bucket);
req.SetKey(key);
@ -298,7 +292,7 @@ void WriteBufferFromS3::writePart()
}
}
void WriteBufferFromS3::fillUploadRequest(Aws::S3::Model::UploadPartRequest & req)
void WriteBufferFromS3::fillUploadRequest(S3::UploadPartRequest & req)
{
/// Increase part number.
++part_number;
@ -369,7 +363,7 @@ void WriteBufferFromS3::completeMultipartUpload()
if (tags.empty())
throw Exception(ErrorCodes::S3_ERROR, "Failed to complete multipart upload. No parts have uploaded");
Aws::S3::Model::CompleteMultipartUploadRequest req;
S3::CompleteMultipartUploadRequest req;
req.SetBucket(bucket);
req.SetKey(key);
req.SetUploadId(multipart_upload_id);
@ -474,7 +468,7 @@ void WriteBufferFromS3::makeSinglepartUpload()
}
}
void WriteBufferFromS3::fillPutRequest(Aws::S3::Model::PutObjectRequest & req)
void WriteBufferFromS3::fillPutRequest(S3::PutObjectRequest & req)
{
req.SetBucket(bucket);
req.SetKey(key);

View File

@ -14,6 +14,7 @@
#include <IO/BufferWithOwnMemory.h>
#include <IO/WriteBuffer.h>
#include <IO/WriteSettings.h>
#include <IO/S3/Requests.h>
#include <Storages/StorageS3Settings.h>
#include <Interpreters/threadPoolCallbackRunner.h>
@ -22,13 +23,7 @@
namespace Aws::S3
{
class S3Client;
}
namespace Aws::S3::Model
{
class UploadPartRequest;
class PutObjectRequest;
class Client;
}
namespace DB
@ -47,7 +42,7 @@ class WriteBufferFromS3 final : public BufferWithOwnMemory<WriteBuffer>
{
public:
WriteBufferFromS3(
std::shared_ptr<const Aws::S3::S3Client> client_ptr_,
std::shared_ptr<const S3::Client> client_ptr_,
const String & bucket_,
const String & key_,
const S3Settings::RequestSettings & request_settings_,
@ -75,11 +70,11 @@ private:
void finalizeImpl() override;
struct UploadPartTask;
void fillUploadRequest(Aws::S3::Model::UploadPartRequest & req);
void fillUploadRequest(S3::UploadPartRequest & req);
void processUploadRequest(UploadPartTask & task);
struct PutObjectTask;
void fillPutRequest(Aws::S3::Model::PutObjectRequest & req);
void fillPutRequest(S3::PutObjectRequest & req);
void processPutRequest(const PutObjectTask & task);
void waitForReadyBackGroundTasks();
@ -90,7 +85,7 @@ private:
const String key;
const S3Settings::RequestSettings request_settings;
const S3Settings::RequestSettings::PartUploadSettings & upload_settings;
const std::shared_ptr<const Aws::S3::S3Client> client_ptr;
const std::shared_ptr<const S3::Client> client_ptr;
const std::optional<std::map<String, String>> object_metadata;
size_t upload_part_size = 0;

View File

@ -418,14 +418,4 @@ void InterpreterGrantQuery::updateRoleFromQuery(Role & role, const ASTGrantQuery
updateFromQuery(role, query);
}
void InterpreterGrantQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & /*ast*/, ContextPtr) const
{
auto & query = query_ptr->as<ASTGrantQuery &>();
if (query.is_revoke)
elem.query_kind = "Revoke";
else
elem.query_kind = "Grant";
}
}

View File

@ -21,7 +21,6 @@ public:
static void updateUserFromQuery(User & user, const ASTGrantQuery & query);
static void updateRoleFromQuery(Role & role, const ASTGrantQuery & query);
void extendQueryLogElemImpl(QueryLogElement &, const ASTPtr &, ContextPtr) const override;
private:
ASTPtr query_ptr;

View File

@ -7,7 +7,10 @@
#include <Common/ProfileEvents.h>
#include <Common/checkStackSize.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <IO/ConnectionTimeoutsContext.h>
#include <Interpreters/AddDefaultDatabaseVisitor.h>
#include <Interpreters/RequiredSourceColumnsVisitor.h>
#include <Interpreters/TranslateQualifiedNamesVisitor.h>
#include <DataTypes/ObjectUtils.h>
#include <Client/IConnections.h>
@ -36,6 +39,53 @@ namespace ErrorCodes
namespace ClusterProxy
{
/// select query has database, table and table function names as AST pointers
/// Creates a copy of query, changes database, table and table function names.
ASTPtr rewriteSelectQuery(
ContextPtr context,
const ASTPtr & query,
const std::string & remote_database,
const std::string & remote_table,
ASTPtr table_function_ptr)
{
auto modified_query_ast = query->clone();
ASTSelectQuery & select_query = modified_query_ast->as<ASTSelectQuery &>();
// Get rid of the settings clause so we don't send them to remote. Thus newly non-important
// settings won't break any remote parser. It's also more reasonable since the query settings
// are written into the query context and will be sent by the query pipeline.
select_query.setExpression(ASTSelectQuery::Expression::SETTINGS, {});
if (table_function_ptr)
select_query.addTableFunction(table_function_ptr);
else
select_query.replaceDatabaseAndTable(remote_database, remote_table);
/// Restore long column names (cause our short names are ambiguous).
/// TODO: aliased table functions & CREATE TABLE AS table function cases
if (!table_function_ptr)
{
RestoreQualifiedNamesVisitor::Data data;
data.distributed_table = DatabaseAndTableWithAlias(*getTableExpression(query->as<ASTSelectQuery &>(), 0));
data.remote_table.database = remote_database;
data.remote_table.table = remote_table;
RestoreQualifiedNamesVisitor(data).visit(modified_query_ast);
}
/// To make local JOIN works, default database should be added to table names.
/// But only for JOIN section, since the following should work using default_database:
/// - SELECT * FROM d WHERE value IN (SELECT l.value FROM l) ORDER BY value
/// (see 01487_distributed_in_not_default_db)
AddDefaultDatabaseVisitor visitor(context, context->getCurrentDatabase(),
/* only_replace_current_database_function_= */false,
/* only_replace_in_join_= */true);
visitor.visit(modified_query_ast);
return modified_query_ast;
}
SelectStreamFactory::SelectStreamFactory(
const Block & header_,
const ColumnsDescriptionByShardNum & objects_by_shard_,
@ -171,67 +221,5 @@ void SelectStreamFactory::createForShard(
}
void SelectStreamFactory::createForShardWithParallelReplicas(
const Cluster::ShardInfo & shard_info,
const ASTPtr & query_ast,
const StorageID & main_table,
ContextPtr context,
UInt32 shard_count,
std::vector<QueryPlanPtr> & local_plans,
Shards & remote_shards)
{
if (auto it = objects_by_shard.find(shard_info.shard_num); it != objects_by_shard.end())
replaceMissedSubcolumnsByConstants(storage_snapshot->object_columns, it->second, query_ast);
const auto & settings = context->getSettingsRef();
auto is_local_replica_obsolete = [&]()
{
auto resolved_id = context->resolveStorageID(main_table);
auto main_table_storage = DatabaseCatalog::instance().tryGetTable(resolved_id, context);
const auto * replicated_storage = dynamic_cast<const StorageReplicatedMergeTree *>(main_table_storage.get());
if (!replicated_storage)
return false;
UInt64 max_allowed_delay = settings.max_replica_delay_for_distributed_queries;
if (!max_allowed_delay)
return false;
UInt64 local_delay = replicated_storage->getAbsoluteDelay();
return local_delay >= max_allowed_delay;
};
size_t next_replica_number = 0;
size_t all_replicas_count = shard_info.getRemoteNodeCount();
auto coordinator = std::make_shared<ParallelReplicasReadingCoordinator>();
if (settings.prefer_localhost_replica && shard_info.isLocal())
{
/// We don't need more than one local replica in parallel reading
if (!is_local_replica_obsolete())
{
++all_replicas_count;
local_plans.emplace_back(createLocalPlan(
query_ast, header, context, processed_stage, shard_info.shard_num, shard_count, next_replica_number, all_replicas_count, coordinator));
++next_replica_number;
}
}
if (shard_info.hasRemoteConnections())
remote_shards.emplace_back(Shard{
.query = query_ast,
.header = header,
.shard_info = shard_info,
.lazy = false,
.local_delay = 0,
.coordinator = coordinator,
});
}
}
}

View File

@ -29,6 +29,14 @@ struct StorageID;
namespace ClusterProxy
{
/// select query has database, table and table function names as AST pointers
/// Creates a copy of query, changes database, table and table function names.
ASTPtr rewriteSelectQuery(
ContextPtr context,
const ASTPtr & query,
const std::string & remote_database,
const std::string & remote_table,
ASTPtr table_function_ptr = nullptr);
using ColumnsDescriptionByShardNum = std::unordered_map<UInt32, ColumnsDescription>;
@ -80,16 +88,6 @@ public:
std::unique_ptr<QueryPlan> remote_plan;
};
void createForShardWithParallelReplicas(
const Cluster::ShardInfo & shard_info,
const ASTPtr & query_ast,
const StorageID & main_table,
ContextPtr context,
UInt32 shard_count,
std::vector<QueryPlanPtr> & local_plans,
Shards & remote_shards);
private:
const Block header;
const ColumnsDescriptionByShardNum objects_by_shard;
const StorageSnapshotPtr storage_snapshot;

View File

@ -1,6 +1,8 @@
#include <Core/QueryProcessingStage.h>
#include <Core/Settings.h>
#include <Core/UUID.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/ObjectUtils.h>
#include <Interpreters/Cluster.h>
#include <Interpreters/ClusterProxy/SelectStreamFactory.h>
#include <Interpreters/ClusterProxy/executeQuery.h>
@ -13,8 +15,11 @@
#include <Processors/QueryPlan/QueryPlan.h>
#include <Processors/QueryPlan/ReadFromRemote.h>
#include <Processors/QueryPlan/UnionStep.h>
#include <Processors/QueryPlan/DistributedCreateLocalPlan.h>
#include <Processors/ResizeProcessor.h>
#include <QueryPipeline/Pipe.h>
#include <Storages/SelectQueryInfo.h>
#include <Storages/StorageReplicatedMergeTree.h>
namespace DB
{
@ -23,6 +28,7 @@ namespace ErrorCodes
{
extern const int TOO_LARGE_DISTRIBUTED_DEPTH;
extern const int LOGICAL_ERROR;
extern const int SUPPORT_IS_DISABLED;
}
namespace ClusterProxy
@ -117,6 +123,31 @@ ContextMutablePtr updateSettingsForCluster(const Cluster & cluster, ContextPtr c
return new_context;
}
static ThrottlerPtr getThrottler(const ContextPtr & context)
{
const Settings & settings = context->getSettingsRef();
ThrottlerPtr user_level_throttler;
if (auto process_list_element = context->getProcessListElement())
user_level_throttler = process_list_element->getUserNetworkThrottler();
/// Network bandwidth limit, if needed.
ThrottlerPtr throttler;
if (settings.max_network_bandwidth || settings.max_network_bytes)
{
throttler = std::make_shared<Throttler>(
settings.max_network_bandwidth,
settings.max_network_bytes,
"Limit for bytes to send or receive over network exceeded.",
user_level_throttler);
}
else
throttler = user_level_throttler;
return throttler;
}
void executeQuery(
QueryPlan & query_plan,
const Block & header,
@ -138,26 +169,8 @@ void executeQuery(
SelectStreamFactory::Shards remote_shards;
auto new_context = updateSettingsForCluster(*query_info.getCluster(), context, settings, main_table, &query_info, log);
new_context->getClientInfo().distributed_depth += 1;
ThrottlerPtr user_level_throttler;
if (auto process_list_element = context->getProcessListElement())
user_level_throttler = process_list_element->getUserNetworkThrottler();
/// Network bandwidth limit, if needed.
ThrottlerPtr throttler;
if (settings.max_network_bandwidth || settings.max_network_bytes)
{
throttler = std::make_shared<Throttler>(
settings.max_network_bandwidth,
settings.max_network_bytes,
"Limit for bytes to send or receive over network exceeded.",
user_level_throttler);
}
else
throttler = user_level_throttler;
size_t shards = query_info.getCluster()->getShardCount();
for (const auto & shard_info : query_info.getCluster()->getShardsInfo())
{
@ -199,7 +212,7 @@ void executeQuery(
main_table,
table_func_ptr,
new_context,
throttler,
getThrottler(context),
std::move(scalars),
std::move(external_tables),
log,
@ -236,103 +249,76 @@ void executeQueryWithParallelReplicas(
const StorageID & main_table,
const ASTPtr & table_func_ptr,
SelectStreamFactory & stream_factory,
const ASTPtr & query_ast,
ContextPtr context,
const SelectQueryInfo & query_info,
const ExpressionActionsPtr & sharding_key_expr,
const std::string & sharding_key_column_name,
const ClusterPtr & not_optimized_cluster,
QueryProcessingStage::Enum processed_stage)
const ASTPtr & query_ast, ContextPtr context, const SelectQueryInfo & query_info,
const ClusterPtr & not_optimized_cluster)
{
const Settings & settings = context->getSettingsRef();
if (not_optimized_cluster->getShardsInfo().size() != 1)
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Cluster for parallel replicas should consist only from one shard");
ThrottlerPtr user_level_throttler;
if (auto process_list_element = context->getProcessListElement())
user_level_throttler = process_list_element->getUserNetworkThrottler();
auto shard_info = not_optimized_cluster->getShardsInfo().front();
/// Network bandwidth limit, if needed.
ThrottlerPtr throttler;
if (settings.max_network_bandwidth || settings.max_network_bytes)
{
throttler = std::make_shared<Throttler>(
settings.max_network_bandwidth,
settings.max_network_bytes,
"Limit for bytes to send or receive over network exceeded.",
user_level_throttler);
}
else
throttler = user_level_throttler;
std::vector<QueryPlanPtr> plans;
SelectStreamFactory::Shards remote_shards;
size_t shards = query_info.getCluster()->getShardCount();
for (const auto & shard_info : query_info.getCluster()->getShardsInfo())
{
ASTPtr query_ast_for_shard;
if (query_info.optimized_cluster && settings.optimize_skip_unused_shards_rewrite_in && shards > 1)
{
query_ast_for_shard = query_ast->clone();
OptimizeShardingKeyRewriteInVisitor::Data visitor_data{
sharding_key_expr,
sharding_key_expr->getSampleBlock().getByPosition(0).type,
sharding_key_column_name,
shard_info,
not_optimized_cluster->getSlotToShard(),
};
OptimizeShardingKeyRewriteInVisitor visitor(visitor_data);
visitor.visit(query_ast_for_shard);
}
else
query_ast_for_shard = query_ast;
stream_factory.createForShardWithParallelReplicas(
shard_info, query_ast_for_shard, main_table, context, static_cast<UInt32>(shards), plans, remote_shards);
}
Scalars scalars = context->hasQueryContext() ? context->getQueryContext()->getScalars() : Scalars{};
scalars.emplace(
"_shard_count", Block{{DataTypeUInt32().createColumnConst(1, shards), std::make_shared<DataTypeUInt32>(), "_shard_count"}});
auto external_tables = context->getExternalTables();
if (!remote_shards.empty())
{
auto new_context = Context::createCopy(context);
for (const auto & shard : remote_shards)
{
auto read_from_remote = std::make_unique<ReadFromParallelRemoteReplicasStep>(
shard.coordinator,
shard,
shard.header,
processed_stage,
main_table,
table_func_ptr,
new_context,
throttler,
scalars,
external_tables,
&Poco::Logger::get("ReadFromParallelRemoteReplicasStep"),
query_info.storage_limits);
auto remote_plan = std::make_unique<QueryPlan>();
remote_plan->addStep(std::move(read_from_remote));
remote_plan->addInterpreterContext(new_context);
plans.emplace_back(std::move(remote_plan));
}
}
if (plans.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "No plans were generated for reading from Distributed. This is a bug");
if (plans.size() == 1)
const auto & settings = context->getSettingsRef();
auto all_replicas_count = std::min(static_cast<size_t>(settings.max_parallel_replicas), shard_info.all_addresses.size());
auto coordinator = std::make_shared<ParallelReplicasReadingCoordinator>(all_replicas_count);
auto remote_plan = std::make_unique<QueryPlan>();
auto plans = std::vector<QueryPlanPtr>();
/// This is a little bit weird, but we construct an "empty" coordinator without
/// any specified reading/coordination method (like Default, InOrder, InReverseOrder)
/// Because we will understand it later during QueryPlan optimization
/// So we place a reference to the coordinator to some common plane like QueryInfo
/// to then tell it about the reading method we chose.
query_info.coordinator = coordinator;
UUID parallel_group_id = UUIDHelpers::generateV4();
plans.emplace_back(createLocalPlan(
query_ast,
stream_factory.header,
context,
stream_factory.processed_stage,
shard_info.shard_num,
/*shard_count*/1,
0,
all_replicas_count,
coordinator,
parallel_group_id));
if (!shard_info.hasRemoteConnections())
{
if (!plans.front())
throw Exception(ErrorCodes::LOGICAL_ERROR, "An empty plan was generated to read from local shard and there is no remote connections. This is a bug");
query_plan = std::move(*plans.front());
return;
}
auto new_context = Context::createCopy(context);
auto scalars = new_context->hasQueryContext() ? new_context->getQueryContext()->getScalars() : Scalars{};
auto external_tables = new_context->getExternalTables();
auto read_from_remote = std::make_unique<ReadFromParallelRemoteReplicasStep>(
query_ast,
std::move(shard_info),
coordinator,
stream_factory.header,
stream_factory.processed_stage,
main_table,
table_func_ptr,
new_context,
getThrottler(new_context),
std::move(scalars),
std::move(external_tables),
&Poco::Logger::get("ReadFromParallelRemoteReplicasStep"),
query_info.storage_limits,
parallel_group_id);
remote_plan->addStep(std::move(read_from_remote));
remote_plan->addInterpreterContext(context);
plans.emplace_back(std::move(remote_plan));
if (std::all_of(plans.begin(), plans.end(), [](const QueryPlanPtr & plan) { return !plan; }))
throw Exception(ErrorCodes::LOGICAL_ERROR, "No plans were generated for reading from shard. This is a bug");
DataStreams input_streams;
input_streams.reserve(plans.size());
for (const auto & plan : plans)

View File

@ -61,10 +61,7 @@ void executeQueryWithParallelReplicas(
const ASTPtr & query_ast,
ContextPtr context,
const SelectQueryInfo & query_info,
const ExpressionActionsPtr & sharding_key_expr,
const std::string & sharding_key_column_name,
const ClusterPtr & not_optimized_cluster,
QueryProcessingStage::Enum processed_stage);
const ClusterPtr & not_optimized_cluster);
}
}

View File

@ -3621,6 +3621,32 @@ void Context::setMergeTreeReadTaskCallback(MergeTreeReadTaskCallback && callback
merge_tree_read_task_callback = callback;
}
MergeTreeAllRangesCallback Context::getMergeTreeAllRangesCallback() const
{
if (!merge_tree_all_ranges_callback.has_value())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Next task callback is not set for query with id: {}", getInitialQueryId());
return merge_tree_all_ranges_callback.value();
}
void Context::setMergeTreeAllRangesCallback(MergeTreeAllRangesCallback && callback)
{
merge_tree_all_ranges_callback = callback;
}
void Context::setParallelReplicasGroupUUID(UUID uuid)
{
parallel_replicas_group_uuid = uuid;
}
UUID Context::getParallelReplicasGroupUUID() const
{
return parallel_replicas_group_uuid;
}
PartUUIDsPtr Context::getIgnoredPartUUIDs() const
{
auto lock = getLock();
@ -3886,4 +3912,22 @@ WriteSettings Context::getWriteSettings() const
return res;
}
bool Context::canUseParallelReplicasOnInitiator() const
{
const auto & settings = getSettingsRef();
return settings.allow_experimental_parallel_reading_from_replicas
&& settings.max_parallel_replicas > 1
&& !settings.use_hedged_requests
&& !getClientInfo().collaborate_with_initiator;
}
bool Context::canUseParallelReplicasOnFollower() const
{
const auto & settings = getSettingsRef();
return settings.allow_experimental_parallel_reading_from_replicas
&& settings.max_parallel_replicas > 1
&& !settings.use_hedged_requests
&& getClientInfo().collaborate_with_initiator;
}
}

View File

@ -1,5 +1,11 @@
#pragma once
#include <base/types.h>
#include <Common/isLocalAddress.h>
#include <Common/MultiVersion.h>
#include <Common/OpenTelemetryTraceContext.h>
#include <Common/RemoteHostFilter.h>
#include <Common/ThreadPool.h>
#include <Core/Block.h>
#include <Core/NamesAndTypes.h>
#include <Core/Settings.h>
@ -8,32 +14,24 @@
#include <Interpreters/Context_fwd.h>
#include <Interpreters/DatabaseCatalog.h>
#include <Interpreters/MergeTreeTransactionHolder.h>
#include <Parsers/IAST_fwd.h>
#include <Parsers/ASTSelectQuery.h>
#include <Storages/IStorage_fwd.h>
#include <Common/MultiVersion.h>
#include <Common/OpenTelemetryTraceContext.h>
#include <Common/RemoteHostFilter.h>
#include <Common/ThreadPool.h>
#include <Common/isLocalAddress.h>
#include <base/types.h>
#include <Storages/MergeTree/ParallelReplicasReadingCoordinator.h>
#include <Storages/ColumnsDescription.h>
#include <IO/IResourceManager.h>
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/IAST_fwd.h>
#include <Processors/ResizeProcessor.h>
#include <Processors/Transforms/ReadFromMergeTreeDependencyTransform.h>
#include <Server/HTTP/HTTPContext.h>
#include <Storages/ColumnsDescription.h>
#include <Storages/IStorage_fwd.h>
#include "config.h"
#include <boost/container/flat_set.hpp>
#include <exception>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <thread>
#include <exception>
namespace Poco::Net { class IPAddress; }
@ -98,7 +96,11 @@ class TransactionsInfoLog;
class ProcessorsProfileLog;
class FilesystemCacheLog;
class AsynchronousInsertLog;
class IAsynchronousReader;
struct MergeTreeSettings;
struct InitialAllRangesAnnouncement;
struct ParallelReadRequest;
struct ParallelReadResponse;
class StorageS3Settings;
class IDatabase;
class DDLWorker;
@ -172,11 +174,15 @@ using InputBlocksReader = std::function<Block(ContextPtr)>;
/// Used in distributed task processing
using ReadTaskCallback = std::function<String()>;
using MergeTreeReadTaskCallback = std::function<std::optional<PartitionReadResponse>(PartitionReadRequest)>;
using MergeTreeAllRangesCallback = std::function<void(InitialAllRangesAnnouncement)>;
using MergeTreeReadTaskCallback = std::function<std::optional<ParallelReadResponse>(ParallelReadRequest)>;
class TemporaryDataOnDiskScope;
using TemporaryDataOnDiskScopePtr = std::shared_ptr<TemporaryDataOnDiskScope>;
class ParallelReplicasReadingCoordinator;
using ParallelReplicasReadingCoordinatorPtr = std::shared_ptr<ParallelReplicasReadingCoordinator>;
#if USE_ROCKSDB
class MergeTreeMetadataCache;
using MergeTreeMetadataCachePtr = std::shared_ptr<MergeTreeMetadataCache>;
@ -262,6 +268,8 @@ private:
/// Used in parallel reading from replicas. A replica tells about its intentions to read
/// some ranges from some part and initiator will tell the replica about whether it is accepted or denied.
std::optional<MergeTreeReadTaskCallback> merge_tree_read_task_callback;
std::optional<MergeTreeAllRangesCallback> merge_tree_all_ranges_callback;
UUID parallel_replicas_group_uuid{UUIDHelpers::Nil};
/// Record entities accessed by current query, and store this information in system.query_log.
struct QueryAccessInfo
@ -380,6 +388,7 @@ private:
/// Temporary data for query execution accounting.
TemporaryDataOnDiskScopePtr temp_data_on_disk;
public:
/// Some counters for current query execution.
/// Most of them are workarounds and should be removed in the future.
@ -402,6 +411,8 @@ public:
KitchenSink kitchen_sink;
ParallelReplicasReadingCoordinatorPtr parallel_reading_coordinator;
private:
using SampleBlockCache = std::unordered_map<std::string, Block>;
mutable SampleBlockCache sample_block_cache;
@ -1045,6 +1056,12 @@ public:
MergeTreeReadTaskCallback getMergeTreeReadTaskCallback() const;
void setMergeTreeReadTaskCallback(MergeTreeReadTaskCallback && callback);
MergeTreeAllRangesCallback getMergeTreeAllRangesCallback() const;
void setMergeTreeAllRangesCallback(MergeTreeAllRangesCallback && callback);
UUID getParallelReplicasGroupUUID() const;
void setParallelReplicasGroupUUID(UUID uuid);
/// Background executors related methods
void initializeBackgroundExecutorsIfNeeded();
bool areBackgroundExecutorsInitialized();
@ -1071,6 +1088,10 @@ public:
/** Get settings for writing to filesystem. */
WriteSettings getWriteSettings() const;
/** There are multiple conditions that have to be met to be able to use parallel replicas */
bool canUseParallelReplicasOnInitiator() const;
bool canUseParallelReplicasOnFollower() const;
private:
std::unique_lock<std::recursive_mutex> getLock() const;

View File

@ -11,6 +11,7 @@ namespace ErrorCodes
extern const int NOT_IMPLEMENTED;
}
void IInterpreter::extendQueryLogElem(
QueryLogElement & elem, const ASTPtr & ast, ContextPtr context, const String & query_database, const String & query_table) const
{

View File

@ -9,18 +9,12 @@
#include <Interpreters/ActionsDAG.h>
#include <Interpreters/ExpressionAnalyzer.h>
#include <Interpreters/TreeRewriter.h>
#include <Processors/QueryPlan/IQueryPlanStep.h>
#include <Processors/QueryPlan/FilterStep.h>
namespace DB
{
void IInterpreterUnionOrSelectQuery::extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & /*ast*/, ContextPtr /*context_*/) const
{
elem.query_kind = "Select";
}
QueryPipelineBuilder IInterpreterUnionOrSelectQuery::buildQueryPipeline()
{
QueryPlan query_plan;

View File

@ -44,8 +44,6 @@ public:
size_t getMaxStreams() const { return max_streams; }
void extendQueryLogElemImpl(QueryLogElement & elem, const ASTPtr & ast, ContextPtr context) const override;
/// Returns whether the query uses the view source from the Context
/// The view source is a virtual storage that currently only materialized views use to replace the source table
/// with the incoming block only

Some files were not shown because too many files have changed in this diff Show More