diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml
index 417284f14d5..44fe082b04d 100644
--- a/.github/workflows/backport_branches.yml
+++ b/.github/workflows/backport_branches.yml
@@ -9,6 +9,18 @@ on: # yamllint disable-line rule:truthy
branches:
- 'backport/**'
jobs:
+ PythonUnitTests:
+ runs-on: [self-hosted, style-checker]
+ steps:
+ - name: Clear repository
+ run: |
+ sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
+ - name: Check out repository code
+ uses: actions/checkout@v2
+ - name: Python unit tests
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 -m unittest discover -s . -p '*_test.py'
DockerHubPushAarch64:
runs-on: [self-hosted, style-checker-aarch64]
steps:
@@ -143,8 +155,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -184,8 +196,8 @@ jobs:
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -229,8 +241,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -274,8 +286,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -319,8 +331,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index eab7ce36eb7..efaf1c64c05 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -219,8 +219,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -260,8 +260,8 @@ jobs:
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -305,8 +305,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -350,8 +350,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -395,8 +395,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -440,8 +440,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -485,8 +485,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -530,8 +530,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -575,8 +575,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -620,8 +620,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -668,8 +668,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -713,8 +713,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -758,8 +758,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -803,8 +803,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -848,8 +848,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -893,8 +893,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -938,8 +938,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index 836421f34dd..bd54fd975c0 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -112,7 +112,7 @@ jobs:
run: |
curl --form token="${COVERITY_TOKEN}" \
--form email='security+coverity@clickhouse.com' \
- --form file="@$TEMP_PATH/$BUILD_NAME/clickhouse-scan.tgz" \
+ --form file="@$TEMP_PATH/$BUILD_NAME/coverity-scan.tgz" \
--form version="${GITHUB_REF#refs/heads/}-${GITHUB_SHA::6}" \
--form description="Nighly Scan: $(date +'%Y-%m-%dT%H:%M:%S')" \
https://scan.coverity.com/builds?project=ClickHouse%2FClickHouse
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index 8942cca391e..8072f816cb8 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -272,8 +272,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -317,8 +317,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -362,8 +362,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -404,8 +404,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -446,8 +446,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -491,8 +491,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -536,8 +536,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -581,8 +581,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -626,8 +626,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -671,8 +671,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -719,8 +719,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -764,8 +764,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -809,8 +809,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -854,8 +854,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -899,8 +899,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -944,8 +944,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -989,8 +989,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 29e3d0c4358..ea2e1ed33fb 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -52,8 +52,8 @@ jobs:
- name: Check docker clickhouse/clickhouse-server building
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
- python3 docker_server.py --release-type auto
- python3 docker_server.py --release-type auto --no-ubuntu \
+ python3 docker_server.py --release-type auto --version "${{ github.ref }}"
+ python3 docker_server.py --release-type auto --version "${{ github.ref }}" --no-ubuntu \
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
- name: Cleanup
if: always()
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index b2af465142b..91e1a224204 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -146,8 +146,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -187,8 +187,8 @@ jobs:
- name: Upload build URLs to artifacts
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ runner.temp }}/build_check/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -232,8 +232,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -277,8 +277,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -322,8 +322,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -367,8 +367,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
@@ -412,8 +412,8 @@ jobs:
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v2
with:
- name: ${{ env.BUILD_NAME }}
- path: ${{ env.TEMP_PATH }}/${{ env.BUILD_NAME }}.json
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d893ba773cc..dad9a25ab26 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -222,6 +222,12 @@ else ()
set(NO_WHOLE_ARCHIVE --no-whole-archive)
endif ()
+option(ENABLE_CURL_BUILD "Enable curl, azure, sentry build on by default except MacOS." ON)
+if (OS_DARWIN)
+ # Disable the curl, azure, senry build on MacOS
+ set (ENABLE_CURL_BUILD OFF)
+endif ()
+
# Ignored if `lld` is used
option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.")
diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp
index 311349a2ba7..b27a904b31a 100644
--- a/base/daemon/BaseDaemon.cpp
+++ b/base/daemon/BaseDaemon.cpp
@@ -828,7 +828,6 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
/// Setup signal handlers.
/// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime.
-
addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP, SIGTRAP}, signalHandler, &handled_signals);
addSignalHandler({SIGHUP}, closeLogsSignalHandler, &handled_signals);
addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals);
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index 9cf307c473e..1f03c0fd341 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -119,9 +119,13 @@ add_contrib (fastops-cmake fastops)
add_contrib (libuv-cmake libuv)
add_contrib (amqpcpp-cmake AMQP-CPP) # requires: libuv
add_contrib (cassandra-cmake cassandra) # requires: libuv
-add_contrib (curl-cmake curl)
-add_contrib (azure-cmake azure)
-add_contrib (sentry-native-cmake sentry-native) # requires: curl
+
+if (ENABLE_CURL_BUILD)
+ add_contrib (curl-cmake curl)
+ add_contrib (azure-cmake azure)
+ add_contrib (sentry-native-cmake sentry-native) # requires: curl
+endif()
+
add_contrib (fmtlib-cmake fmtlib)
add_contrib (krb5-cmake krb5)
add_contrib (cyrus-sasl-cmake cyrus-sasl) # for krb5
diff --git a/contrib/curl b/contrib/curl
index 3b8bbbbd160..801bd5138ce 160000
--- a/contrib/curl
+++ b/contrib/curl
@@ -1 +1 @@
-Subproject commit 3b8bbbbd1609c638a3d3d0acb148a33dedb67be3
+Subproject commit 801bd5138ce31aa0d906fa4e2eabfc599d74e793
diff --git a/contrib/curl-cmake/CMakeLists.txt b/contrib/curl-cmake/CMakeLists.txt
index 589f40384e3..b1e1a0ded8a 100644
--- a/contrib/curl-cmake/CMakeLists.txt
+++ b/contrib/curl-cmake/CMakeLists.txt
@@ -32,7 +32,6 @@ set (SRCS
"${LIBRARY_DIR}/lib/transfer.c"
"${LIBRARY_DIR}/lib/strcase.c"
"${LIBRARY_DIR}/lib/easy.c"
- "${LIBRARY_DIR}/lib/security.c"
"${LIBRARY_DIR}/lib/curl_fnmatch.c"
"${LIBRARY_DIR}/lib/fileinfo.c"
"${LIBRARY_DIR}/lib/wildcard.c"
@@ -115,6 +114,12 @@ set (SRCS
"${LIBRARY_DIR}/lib/curl_get_line.c"
"${LIBRARY_DIR}/lib/altsvc.c"
"${LIBRARY_DIR}/lib/socketpair.c"
+ "${LIBRARY_DIR}/lib/bufref.c"
+ "${LIBRARY_DIR}/lib/dynbuf.c"
+ "${LIBRARY_DIR}/lib/hsts.c"
+ "${LIBRARY_DIR}/lib/http_aws_sigv4.c"
+ "${LIBRARY_DIR}/lib/mqtt.c"
+ "${LIBRARY_DIR}/lib/rename.c"
"${LIBRARY_DIR}/lib/vauth/vauth.c"
"${LIBRARY_DIR}/lib/vauth/cleartext.c"
"${LIBRARY_DIR}/lib/vauth/cram.c"
@@ -131,8 +136,6 @@ set (SRCS
"${LIBRARY_DIR}/lib/vtls/gtls.c"
"${LIBRARY_DIR}/lib/vtls/vtls.c"
"${LIBRARY_DIR}/lib/vtls/nss.c"
- "${LIBRARY_DIR}/lib/vtls/polarssl.c"
- "${LIBRARY_DIR}/lib/vtls/polarssl_threadlock.c"
"${LIBRARY_DIR}/lib/vtls/wolfssl.c"
"${LIBRARY_DIR}/lib/vtls/schannel.c"
"${LIBRARY_DIR}/lib/vtls/schannel_verify.c"
@@ -141,6 +144,7 @@ set (SRCS
"${LIBRARY_DIR}/lib/vtls/mbedtls.c"
"${LIBRARY_DIR}/lib/vtls/mesalink.c"
"${LIBRARY_DIR}/lib/vtls/bearssl.c"
+ "${LIBRARY_DIR}/lib/vtls/keylog.c"
"${LIBRARY_DIR}/lib/vquic/ngtcp2.c"
"${LIBRARY_DIR}/lib/vquic/quiche.c"
"${LIBRARY_DIR}/lib/vssh/libssh2.c"
diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh
index 861e17848a4..6aa9d88f5b4 100755
--- a/docker/test/stateful/run.sh
+++ b/docker/test/stateful/run.sh
@@ -96,7 +96,7 @@ else
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
- clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits"
+ clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0"
fi
clickhouse-client --query "SHOW TABLES FROM test"
diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh
index f8b73791388..63750b90b5a 100755
--- a/docker/test/stateless/run.sh
+++ b/docker/test/stateless/run.sh
@@ -139,7 +139,7 @@ pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhous
# directly
# - even though ci auto-compress some files (but not *.tsv) it does this only
# for files >64MB, we want this files to be compressed explicitly
-for table in query_log zookeeper_log trace_log
+for table in query_log zookeeper_log trace_log transactions_info_log
do
clickhouse-client -q "select * from system.$table format TSVWithNamesAndTypes" | pigz > /test_output/$table.tsv.gz &
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
diff --git a/docker/test/stress/stress b/docker/test/stress/stress
index 86f8edf5980..10c6088af75 100755
--- a/docker/test/stress/stress
+++ b/docker/test/stress/stress
@@ -10,7 +10,7 @@ import logging
import time
-def get_options(i):
+def get_options(i, backward_compatibility_check):
options = []
client_options = []
if 0 < i:
@@ -19,7 +19,7 @@ def get_options(i):
if i % 3 == 1:
options.append("--db-engine=Ordinary")
- if i % 3 == 2:
+ if i % 3 == 2 and not backward_compatibility_check:
options.append('''--db-engine="Replicated('/test/db/test_{}', 's1', 'r1')"'''.format(i))
client_options.append('allow_experimental_database_replicated=1')
@@ -57,7 +57,7 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option, global_t
pipes = []
for i in range(0, len(output_paths)):
f = open(output_paths[i], 'w')
- full_command = "{} {} {} {} {}".format(cmd, get_options(i), global_time_limit_option, skip_tests_option, backward_compatibility_check_option)
+ full_command = "{} {} {} {} {}".format(cmd, get_options(i, backward_compatibility_check), global_time_limit_option, skip_tests_option, backward_compatibility_check_option)
logging.info("Run func tests '%s'", full_command)
p = Popen(full_command, shell=True, stdout=f, stderr=f)
pipes.append(p)
diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md
index 20d6b20feb6..98eea85bbfa 100644
--- a/docs/en/introduction/adopters.md
+++ b/docs/en/introduction/adopters.md
@@ -158,6 +158,7 @@ toc_title: Adopters
| Staffcop | Information Security | Main Product | — | — | [Official website, Documentation](https://www.staffcop.ru/sce43) |
| Suning | E-Commerce | User behaviour analytics | — | — | [Blog article](https://www.sohu.com/a/434152235_411876) |
| Superwall | Monetization Tooling | Main product | — | — | [Word of mouth, Jan 2022](https://github.com/ClickHouse/ClickHouse/pull/33573) |
+| Swetrix | Analytics | Main Product | — | — | [Source code](https://github.com/swetrix/swetrix-api) |
| Teralytics | Mobility | Analytics | — | — | [Tech blog](https://www.teralytics.net/knowledge-hub/visualizing-mobility-data-the-scalability-challenge) |
| Tencent | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) |
| Tencent | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |
diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md
index f9996cbfb0b..07abd77fed0 100644
--- a/docs/en/operations/settings/settings.md
+++ b/docs/en/operations/settings/settings.md
@@ -519,6 +519,33 @@ Possible values:
Default value: `1`.
+## allow_settings_after_format_in_insert {#allow_settings_after_format_in_insert}
+
+Control whether `SETTINGS` after `FORMAT` in `INSERT` queries is allowed or not. It is not recommended to use this, since this may interpret part of `SETTINGS` as values.
+
+Example:
+
+```sql
+INSERT INTO FUNCTION null('foo String') SETTINGS max_threads=1 VALUES ('bar');
+```
+
+But the following query will work only with `allow_settings_after_format_in_insert`:
+
+```sql
+SET allow_settings_after_format_in_insert=1;
+INSERT INTO FUNCTION null('foo String') VALUES ('bar') SETTINGS max_threads=1;
+```
+
+Possible values:
+
+- 0 — Disallow.
+- 1 — Allow.
+
+Default value: `0`.
+
+!!! note "Warning"
+ Use this setting only for backward compatibility if your use cases depend on old syntax.
+
## input_format_skip_unknown_fields {#settings-input-format-skip-unknown-fields}
Enables or disables skipping insertion of extra data.
diff --git a/programs/format/Format.cpp b/programs/format/Format.cpp
index 835afcdb2ed..50d85cdd43d 100644
--- a/programs/format/Format.cpp
+++ b/programs/format/Format.cpp
@@ -54,6 +54,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
("multiquery,n", "allow multiple queries in the same file")
("obfuscate", "obfuscate instead of formatting")
("backslash", "add a backslash at the end of each line of the formatted query")
+ ("allow_settings_after_format_in_insert", "Allow SETTINGS after FORMAT, but note, that this is not always safe")
("seed", po::value(), "seed (arbitrary string) that determines the result of obfuscation")
;
@@ -83,6 +84,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
bool multiple = options.count("multiquery");
bool obfuscate = options.count("obfuscate");
bool backslash = options.count("backslash");
+ bool allow_settings_after_format_in_insert = options.count("allow_settings_after_format_in_insert");
if (quiet && (hilite || oneline || obfuscate))
{
@@ -154,7 +156,7 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
const char * pos = query.data();
const char * end = pos + query.size();
- ParserQuery parser(end);
+ ParserQuery parser(end, allow_settings_after_format_in_insert);
do
{
ASTPtr res = parseQueryAndMovePosition(
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index 0b5a7724fe5..fc9187cb622 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -1639,6 +1639,8 @@ int Server::main(const std::vector & /*args*/)
server.start();
LOG_INFO(log, "Listening for {}", server.getDescription());
}
+
+ global_context->setServerCompletelyStarted();
LOG_INFO(log, "Ready for connections.");
}
diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h
index fb5eafbe679..accfa0ad33d 100644
--- a/src/Access/Common/AccessType.h
+++ b/src/Access/Common/AccessType.h
@@ -102,6 +102,7 @@ enum class AccessType
\
M(KILL_QUERY, "", GLOBAL, ALL) /* allows to kill a query started by another user
(anyone can kill his own queries) */\
+ M(KILL_TRANSACTION, "", GLOBAL, ALL) \
\
M(MOVE_PARTITION_BETWEEN_SHARDS, "", GLOBAL, ALL) /* required to be able to move a part/partition to a table
identified by its ZooKeeper path */\
diff --git a/src/Backups/ArchiveBackup.cpp b/src/Backups/ArchiveBackup.cpp
index 5d4837fff2e..0c4b0c3cd40 100644
--- a/src/Backups/ArchiveBackup.cpp
+++ b/src/Backups/ArchiveBackup.cpp
@@ -42,6 +42,14 @@ void ArchiveBackup::openImpl(OpenMode open_mode_)
/// mutex is already locked
if (open_mode_ == OpenMode::WRITE)
{
+ /// Create a directory to contain the archive.
+ auto dir_path = fs::path(path).parent_path();
+ if (disk)
+ disk->createDirectories(dir_path);
+ else
+ std::filesystem::create_directories(dir_path);
+
+ /// Start writing the archive.
if (disk)
writer = createArchiveWriter(path, disk->writeFile(path));
else
@@ -65,7 +73,7 @@ void ArchiveBackup::openImpl(OpenMode open_mode_)
}
}
-void ArchiveBackup::closeImpl(bool writing_finalized_)
+void ArchiveBackup::closeImpl(const Strings &, bool writing_finalized_)
{
/// mutex is already locked
if (writer && writer->isWritingFile())
diff --git a/src/Backups/ArchiveBackup.h b/src/Backups/ArchiveBackup.h
index 9649c0c1843..d947fa16beb 100644
--- a/src/Backups/ArchiveBackup.h
+++ b/src/Backups/ArchiveBackup.h
@@ -35,7 +35,7 @@ public:
private:
bool backupExists() const override;
void openImpl(OpenMode open_mode_) override;
- void closeImpl(bool writing_finalized_) override;
+ void closeImpl(const Strings & written_files_, bool writing_finalized_) override;
bool supportsWritingInMultipleThreads() const override { return false; }
std::unique_ptr readFileImpl(const String & file_name) const override;
std::unique_ptr writeFileImpl(const String & file_name) override;
diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp
index e4fc894837a..21300f2dbae 100644
--- a/src/Backups/BackupImpl.cpp
+++ b/src/Backups/BackupImpl.cpp
@@ -107,6 +107,7 @@ void BackupImpl::open(OpenMode open_mode_)
timestamp = std::time(nullptr);
uuid = UUIDHelpers::generateV4();
writing_finalized = false;
+ written_files.clear();
}
if (open_mode_ == OpenMode::READ)
@@ -145,7 +146,7 @@ void BackupImpl::close()
if (open_mode == OpenMode::NONE)
return;
- closeImpl(writing_finalized);
+ closeImpl(written_files, writing_finalized);
uuid = UUIDHelpers::Nil;
timestamp = 0;
@@ -202,9 +203,12 @@ void BackupImpl::writeBackupMetadata()
config->setString(prefix + "checksum", getHexUIntLowercase(info.checksum));
if (info.base_size)
{
- config->setUInt(prefix + "base_size", info.base_size);
- if (info.base_checksum != info.checksum)
+ config->setBool(prefix + "use_base", true);
+ if (info.base_size != info.size)
+ {
+ config->setUInt(prefix + "base_size", info.base_size);
config->setString(prefix + "base_checksum", getHexUIntLowercase(info.base_checksum));
+ }
}
}
++index;
@@ -213,6 +217,7 @@ void BackupImpl::writeBackupMetadata()
std::ostringstream stream; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
config->save(stream);
String str = stream.str();
+ written_files.push_back(".backup");
auto out = writeFileImpl(".backup");
out->write(str.data(), str.size());
}
@@ -253,13 +258,14 @@ void BackupImpl::readBackupMetadata()
if (info.size)
{
info.checksum = unhexChecksum(config->getString(prefix + "checksum"));
- info.base_size = config->getUInt(prefix + "base_size", 0);
+ bool use_base = config->getBool(prefix + "use_base", false);
+ info.base_size = config->getUInt(prefix + "base_size", use_base ? info.size : 0);
if (info.base_size)
{
- if (config->has(prefix + "base_checksum"))
- info.base_checksum = unhexChecksum(config->getString(prefix + "base_checksum"));
- else
+ if (info.base_size == info.size)
info.base_checksum = info.checksum;
+ else
+ info.base_checksum = unhexChecksum(config->getString(prefix + "base_checksum"));
}
}
file_infos.emplace(name, info);
@@ -345,11 +351,6 @@ BackupEntryPtr BackupImpl::readFile(const String & file_name) const
return std::make_unique(nullptr, 0, UInt128{0, 0});
}
- auto read_callback = [backup = std::static_pointer_cast(shared_from_this()), file_name]()
- {
- return backup->readFileImpl(file_name);
- };
-
if (!info.base_size)
{
/// Data goes completely from this backup, the base backup isn't used.
@@ -526,6 +527,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry)
}
/// Copy the entry's data after `copy_pos`.
+ written_files.push_back(file_name);
auto out = writeFileImpl(file_name);
copyData(*read_buffer, *out);
diff --git a/src/Backups/BackupImpl.h b/src/Backups/BackupImpl.h
index d1fc3c3248c..597b025d0ef 100644
--- a/src/Backups/BackupImpl.h
+++ b/src/Backups/BackupImpl.h
@@ -47,7 +47,7 @@ protected:
virtual void openImpl(OpenMode open_mode_) = 0;
OpenMode getOpenModeNoLock() const { return open_mode; }
- virtual void closeImpl(bool writing_finalized_) = 0;
+ virtual void closeImpl(const Strings & written_files_, bool writing_finalized_) = 0;
/// Read a file from the backup.
/// Low level: the function doesn't check base backup or checksums.
@@ -86,6 +86,7 @@ private:
std::optional base_backup_uuid;
std::map file_infos; /// Should be ordered alphabetically, see listFiles().
std::unordered_map file_checksums;
+ Strings written_files;
bool writing_finalized = false;
};
diff --git a/src/Backups/BackupInfo.cpp b/src/Backups/BackupInfo.cpp
index ba953b818c2..cab08e306d6 100644
--- a/src/Backups/BackupInfo.cpp
+++ b/src/Backups/BackupInfo.cpp
@@ -1,6 +1,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -23,7 +24,11 @@ String BackupInfo::toString() const
auto list = std::make_shared();
func->arguments = list;
func->children.push_back(list);
- list->children.reserve(args.size());
+ list->children.reserve(args.size() + !id_arg.empty());
+
+ if (!id_arg.empty())
+ list->children.push_back(std::make_shared(id_arg));
+
for (const auto & arg : args)
list->children.push_back(std::make_shared(arg));
@@ -53,9 +58,22 @@ BackupInfo BackupInfo::fromAST(const IAST & ast)
const auto * list = func->arguments->as();
if (!list)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected list, got {}", serializeAST(*func->arguments));
- res.args.reserve(list->children.size());
- for (const auto & elem : list->children)
+
+ size_t index = 0;
+ if (!list->children.empty())
{
+ const auto * id = list->children[0]->as();
+ if (id)
+ {
+ res.id_arg = id->name();
+ ++index;
+ }
+ }
+
+ res.args.reserve(list->children.size() - index);
+ for (; index < list->children.size(); ++index)
+ {
+ const auto & elem = list->children[index];
const auto * lit = elem->as();
if (!lit)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected literal, got {}", serializeAST(*elem));
diff --git a/src/Backups/BackupInfo.h b/src/Backups/BackupInfo.h
index 9b7d03c6d6a..5b5c676ecf1 100644
--- a/src/Backups/BackupInfo.h
+++ b/src/Backups/BackupInfo.h
@@ -11,6 +11,7 @@ class IAST;
struct BackupInfo
{
String backup_engine_name;
+ String id_arg;
std::vector args;
String toString() const;
diff --git a/src/Backups/DirectoryBackup.cpp b/src/Backups/DirectoryBackup.cpp
index dc4d098dbe9..0deb41c200d 100644
--- a/src/Backups/DirectoryBackup.cpp
+++ b/src/Backups/DirectoryBackup.cpp
@@ -1,16 +1,9 @@
#include
-#include
-#include
#include
namespace DB
{
-namespace ErrorCodes
-{
- extern const int BAD_ARGUMENTS;
-}
-
DirectoryBackup::DirectoryBackup(
const String & backup_name_,
@@ -19,23 +12,16 @@ DirectoryBackup::DirectoryBackup(
const ContextPtr & context_,
const std::optional & base_backup_info_)
: BackupImpl(backup_name_, context_, base_backup_info_)
- , disk(disk_), path(path_)
+ , disk(disk_)
{
- /// Path to backup must end with '/'
- if (!path.ends_with("/"))
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "Backup {}: Path to backup must end with '/', but {} doesn't.", getName(), quoteString(path));
- dir_path = fs::path(path).parent_path(); /// get path without terminating slash
+ /// Remove terminating slash.
+ path = (std::filesystem::path(path_) / "").parent_path();
/// If `disk` is not specified, we create an internal instance of `DiskLocal` here.
if (!disk)
{
- auto fspath = fs::path{dir_path};
- if (!fspath.has_filename())
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "Backup {}: Path to a backup must be a directory path.", getName(), quoteString(path));
- path = fspath.filename() / "";
- dir_path = fs::path(path).parent_path(); /// get path without terminating slash
- String disk_path = fspath.remove_filename();
- disk = std::make_shared(disk_path, disk_path, 0);
+ disk = std::make_shared(path, path, 0);
+ path = ".";
}
}
@@ -47,34 +33,38 @@ DirectoryBackup::~DirectoryBackup()
bool DirectoryBackup::backupExists() const
{
- return disk->isDirectory(dir_path);
+ return disk->isDirectory(path);
}
void DirectoryBackup::openImpl(OpenMode open_mode_)
{
if (open_mode_ == OpenMode::WRITE)
- disk->createDirectories(dir_path);
+ disk->createDirectories(path);
}
-void DirectoryBackup::closeImpl(bool writing_finalized_)
+void DirectoryBackup::closeImpl(const Strings & written_files_, bool writing_finalized_)
{
- if ((getOpenModeNoLock() == OpenMode::WRITE) && !writing_finalized_ && disk->isDirectory(dir_path))
+ if ((getOpenModeNoLock() == OpenMode::WRITE) && !writing_finalized_ && !written_files_.empty())
{
/// Creating of the backup wasn't finished correctly,
/// so the backup cannot be used and it's better to remove its files.
- disk->removeRecursive(dir_path);
+ const auto & files_to_delete = written_files_;
+ for (const String & file_name : files_to_delete)
+ disk->removeFileIfExists(path / file_name);
+ if (disk->isDirectory(path) && disk->isDirectoryEmpty(path))
+ disk->removeDirectory(path);
}
}
std::unique_ptr DirectoryBackup::readFileImpl(const String & file_name) const
{
- String file_path = path + file_name;
+ auto file_path = path / file_name;
return disk->readFile(file_path);
}
std::unique_ptr DirectoryBackup::writeFileImpl(const String & file_name)
{
- String file_path = path + file_name;
+ auto file_path = path / file_name;
disk->createDirectories(fs::path(file_path).parent_path());
return disk->writeFile(file_path);
}
diff --git a/src/Backups/DirectoryBackup.h b/src/Backups/DirectoryBackup.h
index 7d9b5cc4557..499a1893dca 100644
--- a/src/Backups/DirectoryBackup.h
+++ b/src/Backups/DirectoryBackup.h
@@ -1,6 +1,7 @@
#pragma once
#include
+#include
namespace DB
@@ -25,13 +26,12 @@ public:
private:
bool backupExists() const override;
void openImpl(OpenMode open_mode_) override;
- void closeImpl(bool writing_finalized_) override;
+ void closeImpl(const Strings & written_files_, bool writing_finalized_) override;
std::unique_ptr readFileImpl(const String & file_name) const override;
std::unique_ptr writeFileImpl(const String & file_name) override;
DiskPtr disk;
- String path;
- String dir_path; /// `path` without terminating slash
+ std::filesystem::path path;
};
}
diff --git a/src/Backups/registerBackupEnginesFileAndDisk.cpp b/src/Backups/registerBackupEnginesFileAndDisk.cpp
index 6a34d67115d..e3b06a21d96 100644
--- a/src/Backups/registerBackupEnginesFileAndDisk.cpp
+++ b/src/Backups/registerBackupEnginesFileAndDisk.cpp
@@ -2,6 +2,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -13,8 +14,9 @@ namespace DB
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
- extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int INVALID_CONFIG_PARAMETER;
+ extern const int LOGICAL_ERROR;
+ extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
@@ -22,83 +24,70 @@ namespace
{
namespace fs = std::filesystem;
- [[noreturn]] void throwDiskIsAllowed(const String & disk_name)
- {
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk {} is not allowed for backups", disk_name);
- }
-
- [[noreturn]] void throwPathNotAllowed(const fs::path & path)
- {
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} is not allowed for backups", quoteString(String{path}));
- }
-
- void checkAllowedPathInConfigIsValid(const String & key, const fs::path & value)
- {
- if (value.empty() || value.is_relative())
- throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Configuration parameter {} has a wrong value {}", key, String{value});
- }
-
- /// Checks that a disk name and a path specified as parameters of Disk() are valid.
- void checkDiskNameAndPath(const String & disk_name, fs::path & path, const Poco::Util::AbstractConfiguration & config)
+ /// Checks that a disk name specified as parameters of Disk() is valid.
+ void checkDiskName(const String & disk_name, const Poco::Util::AbstractConfiguration & config)
{
String key = "backups.allowed_disk";
- bool disk_name_found = false;
+ if (!config.has(key))
+ throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "The \"backups.allowed_disk\" configuration parameter is not set, cannot use Disk() backup engine");
+
size_t counter = 0;
- while (config.has(key))
+ while (config.getString(key) != disk_name)
{
- if (config.getString(key) == disk_name)
- {
- disk_name_found = true;
- break;
- }
key = "backups.allowed_disk[" + std::to_string(++counter) + "]";
+ if (!config.has(key))
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk {} is not allowed for backups, see the \"backups.allowed_disk\" configuration parameter", disk_name);
}
-
- if (!disk_name_found)
- throwDiskIsAllowed(disk_name);
-
- path = path.lexically_normal();
- if (!path.is_relative() || path.empty() || (*path.begin() == ".."))
- throwPathNotAllowed(path);
}
- /// Checks that a path specified as a parameter of File() is valid.
- void checkPath(fs::path & path, const Poco::Util::AbstractConfiguration & config)
+ /// Checks that a path specified as parameters of Disk() is valid.
+ void checkPath(const String & disk_name, const DiskPtr & disk, fs::path & path)
{
- String key = "backups.allowed_path";
+ path = path.lexically_normal();
+ if (!path.is_relative() && (disk->getType() == DiskType::Local))
+ path = path.lexically_proximate(disk->getPath());
+ bool path_ok = path.empty() || (path.is_relative() && (*path.begin() != ".."));
+ if (!path_ok)
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} to backup must be inside the specified disk {}", quoteString(path.c_str()), disk_name);
+ }
+
+ /// Checks that a path specified as parameters of File() is valid.
+ void checkPath(fs::path & path, const Poco::Util::AbstractConfiguration & config, const fs::path & data_dir) {
path = path.lexically_normal();
if (path.empty())
- throwPathNotAllowed(path);
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path to backup must not be empty");
+
+ String key = "backups.allowed_path";
+ if (!config.has(key))
+ throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER,
+ "The \"backups.allowed_path\" configuration parameter is not set, cannot use File() backup engine");
if (path.is_relative())
{
- if (*path.begin() == "..")
- throwPathNotAllowed(path);
+ auto first_allowed_path = fs::path(config.getString(key));
+ if (first_allowed_path.is_relative())
+ first_allowed_path = data_dir / first_allowed_path;
- auto base = fs::path(config.getString(key, ""));
- checkAllowedPathInConfigIsValid(key, base);
- path = base / path;
- return;
+ path = first_allowed_path / path;
}
- bool path_found_in_config = false;
size_t counter = 0;
- while (config.has(key))
+ while (true)
{
- auto base = fs::path(config.getString(key));
- checkAllowedPathInConfigIsValid(key, base);
- auto rel = path.lexically_relative(base);
- if (!rel.empty() && (*rel.begin() != ".."))
- {
- path_found_in_config = true;
+ auto allowed_path = fs::path(config.getString(key));
+ if (allowed_path.is_relative())
+ allowed_path = data_dir / allowed_path;
+ auto rel = path.lexically_proximate(allowed_path);
+ bool path_ok = rel.empty() || (rel.is_relative() && (*rel.begin() != ".."));
+ if (path_ok)
break;
- }
key = "backups.allowed_path[" + std::to_string(++counter) + "]";
+ if (!config.has(key))
+ throw Exception(ErrorCodes::BAD_ARGUMENTS,
+ "Path {} is not allowed for backups, see the \"backups.allowed_path\" configuration parameter",
+ quoteString(path.c_str()));
}
-
- if (!path_found_in_config)
- throwPathNotAllowed(path);
}
}
@@ -109,6 +98,15 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
{
String backup_name = params.backup_info.toString();
const String & engine_name = params.backup_info.backup_engine_name;
+
+ if (!params.backup_info.id_arg.empty())
+ {
+ throw Exception(
+ ErrorCodes::BAD_ARGUMENTS,
+ "Backup engine '{}' requires first argument to be a string",
+ engine_name);
+ }
+
const auto & args = params.backup_info.args;
DiskPtr disk;
@@ -123,7 +121,9 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
}
path = args[0].safeGet();
- checkPath(path, params.context->getConfigRef());
+ const auto & config = params.context->getConfigRef();
+ const auto & data_dir = params.context->getPath();
+ checkPath(path, config, data_dir);
}
else if (engine_name == "Disk")
{
@@ -135,30 +135,28 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
}
String disk_name = args[0].safeGet();
+ const auto & config = params.context->getConfigRef();
+ checkDiskName(disk_name, config);
path = args[1].safeGet();
- checkDiskNameAndPath(disk_name, path, params.context->getConfigRef());
disk = params.context->getDisk(disk_name);
+ checkPath(disk_name, disk, path);
}
+ else
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected backup engine '{}'", engine_name);
- std::unique_ptr backup;
-
- if (!path.has_filename() && !path.empty())
- {
- if (!params.password.empty())
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "Password is not applicable, backup cannot be encrypted");
- backup = std::make_unique(backup_name, disk, path, params.context, params.base_backup_info);
- }
- else if (hasRegisteredArchiveFileExtension(path))
+ if (hasRegisteredArchiveFileExtension(path))
{
auto archive_backup = std::make_unique(backup_name, disk, path, params.context, params.base_backup_info);
archive_backup->setCompression(params.compression_method, params.compression_level);
archive_backup->setPassword(params.password);
- backup = std::move(archive_backup);
+ return archive_backup;
}
else
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path to backup must be either a directory or a path to an archive");
-
- return backup;
+ {
+ if (!params.password.empty())
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Password is not applicable, backup cannot be encrypted");
+ return std::make_unique(backup_name, disk, path, params.context, params.base_backup_info);
+ }
};
factory.registerBackupEngine("File", creator_fn);
diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp
index 93ed1780e3b..9092cb16663 100644
--- a/src/Client/ClientBase.cpp
+++ b/src/Client/ClientBase.cpp
@@ -275,7 +275,7 @@ void ClientBase::setupSignalHandler()
ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, bool allow_multi_statements) const
{
- ParserQuery parser(end);
+ ParserQuery parser(end, global_context->getSettings().allow_settings_after_format_in_insert);
ASTPtr res;
const auto & settings = global_context->getSettingsRef();
@@ -1129,7 +1129,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
sendDataFromPipe(
storage->read(
sample.getNames(),
- storage->getStorageSnapshot(metadata),
+ storage->getStorageSnapshot(metadata, global_context),
query_info,
global_context,
{},
diff --git a/src/Common/CurrentThread.h b/src/Common/CurrentThread.h
index 9dbe8d355d6..4888adb511a 100644
--- a/src/Common/CurrentThread.h
+++ b/src/Common/CurrentThread.h
@@ -91,6 +91,7 @@ public:
struct QueryScope
{
explicit QueryScope(ContextMutablePtr query_context);
+ explicit QueryScope(ContextPtr query_context);
~QueryScope();
void logPeakMemoryUsage();
diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp
index 2e60e125d73..3097af6207c 100644
--- a/src/Common/ErrorCodes.cpp
+++ b/src/Common/ErrorCodes.cpp
@@ -617,6 +617,8 @@
M(646, CANNOT_BACKUP_DATABASE) \
M(647, CANNOT_BACKUP_TABLE) \
M(648, WRONG_DDL_RENAMING_SETTINGS) \
+ M(649, INVALID_TRANSACTION) \
+ M(650, SERIALIZATION_ERROR) \
\
M(999, KEEPER_EXCEPTION) \
M(1000, POCO_EXCEPTION) \
diff --git a/src/Common/FileCache.cpp b/src/Common/FileCache.cpp
index d648267b95d..05d32f5ffe4 100644
--- a/src/Common/FileCache.cpp
+++ b/src/Common/FileCache.cpp
@@ -3,6 +3,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -31,13 +32,11 @@ namespace
IFileCache::IFileCache(
const String & cache_base_path_,
- size_t max_size_,
- size_t max_element_size_,
- size_t max_file_segment_size_)
+ const FileCacheSettings & cache_settings_)
: cache_base_path(cache_base_path_)
- , max_size(max_size_)
- , max_element_size(max_element_size_)
- , max_file_segment_size(max_file_segment_size_)
+ , max_size(cache_settings_.max_size)
+ , max_element_size(cache_settings_.max_elements)
+ , max_file_segment_size(cache_settings_.max_file_segment_size)
{
}
@@ -58,7 +57,7 @@ String IFileCache::getPathInLocalCache(const Key & key)
return fs::path(cache_base_path) / key_str.substr(0, 3) / key_str;
}
-bool IFileCache::shouldBypassCache()
+bool IFileCache::isReadOnly()
{
return !CurrentThread::isInitialized()
|| !CurrentThread::get().getQueryContext()
@@ -71,8 +70,8 @@ void IFileCache::assertInitialized() const
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Cache not initialized");
}
-LRUFileCache::LRUFileCache(const String & cache_base_path_, size_t max_size_, size_t max_element_size_, size_t max_file_segment_size_)
- : IFileCache(cache_base_path_, max_size_, max_element_size_, max_file_segment_size_)
+LRUFileCache::LRUFileCache(const String & cache_base_path_, const FileCacheSettings & cache_settings_)
+ : IFileCache(cache_base_path_, cache_settings_)
, log(&Poco::Logger::get("LRUFileCache"))
{
}
@@ -205,8 +204,8 @@ FileSegments LRUFileCache::getImpl(
return result;
}
-FileSegments LRUFileCache::splitRangeIntoEmptyCells(
- const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock)
+FileSegments LRUFileCache::splitRangeIntoCells(
+ const Key & key, size_t offset, size_t size, FileSegment::State state, std::lock_guard & cache_lock)
{
assert(size > 0);
@@ -222,9 +221,10 @@ FileSegments LRUFileCache::splitRangeIntoEmptyCells(
current_cell_size = std::min(remaining_size, max_file_segment_size);
remaining_size -= current_cell_size;
- auto * cell = addCell(key, current_pos, current_cell_size, FileSegment::State::EMPTY, cache_lock);
+ auto * cell = addCell(key, current_pos, current_cell_size, state, cache_lock);
if (cell)
file_segments.push_back(cell->file_segment);
+ assert(cell);
current_pos += current_cell_size;
}
@@ -250,7 +250,7 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t
if (file_segments.empty())
{
- file_segments = splitRangeIntoEmptyCells(key, offset, size, cache_lock);
+ file_segments = splitRangeIntoCells(key, offset, size, FileSegment::State::EMPTY, cache_lock);
}
else
{
@@ -295,7 +295,7 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t
assert(current_pos < segment_range.left);
auto hole_size = segment_range.left - current_pos;
- file_segments.splice(it, splitRangeIntoEmptyCells(key, current_pos, hole_size, cache_lock));
+ file_segments.splice(it, splitRangeIntoCells(key, current_pos, hole_size, FileSegment::State::EMPTY, cache_lock));
current_pos = segment_range.right + 1;
++it;
@@ -309,7 +309,7 @@ FileSegmentsHolder LRUFileCache::getOrSet(const Key & key, size_t offset, size_t
/// segmentN
auto hole_size = range.right - current_pos + 1;
- file_segments.splice(file_segments.end(), splitRangeIntoEmptyCells(key, current_pos, hole_size, cache_lock));
+ file_segments.splice(file_segments.end(), splitRangeIntoCells(key, current_pos, hole_size, FileSegment::State::EMPTY, cache_lock));
}
}
@@ -354,6 +354,21 @@ LRUFileCache::FileSegmentCell * LRUFileCache::addCell(
return &(it->second);
}
+FileSegmentsHolder LRUFileCache::setDownloading(const Key & key, size_t offset, size_t size)
+{
+ std::lock_guard cache_lock(mutex);
+
+ auto * cell = getCell(key, offset, cache_lock);
+ if (cell)
+ throw Exception(
+ ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
+ "Cache cell already exists for key `{}` and offset {}",
+ keyToStr(key), offset);
+
+ auto file_segments = splitRangeIntoCells(key, offset, size, FileSegment::State::DOWNLOADING, cache_lock);
+ return FileSegmentsHolder(std::move(file_segments));
+}
+
bool LRUFileCache::tryReserve(
const Key & key_, size_t offset_, size_t size, std::lock_guard & cache_lock)
{
@@ -372,7 +387,8 @@ bool LRUFileCache::tryReserve(
auto is_overflow = [&]
{
- return (current_size + size - removed_size > max_size)
+ /// max_size == 0 means unlimited cache size, max_element_size means unlimited number of cache elements.
+ return (max_size != 0 && current_size + size - removed_size > max_size)
|| (max_element_size != 0 && queue_size > max_element_size);
};
@@ -484,6 +500,30 @@ void LRUFileCache::remove(const Key & key)
fs::remove(key_path);
}
+void LRUFileCache::tryRemoveAll()
+{
+ /// Try remove all cached files by cache_base_path.
+ /// Only releasable file segments are evicted.
+
+ std::lock_guard cache_lock(mutex);
+
+ for (auto it = queue.begin(); it != queue.end();)
+ {
+ auto & [key, offset] = *it++;
+
+ auto * cell = getCell(key, offset, cache_lock);
+ if (cell->releasable())
+ {
+ auto file_segment = cell->file_segment;
+ if (file_segment)
+ {
+ std::lock_guard segment_lock(file_segment->mutex);
+ remove(file_segment->key(), file_segment->offset(), cache_lock, segment_lock);
+ }
+ }
+ }
+}
+
void LRUFileCache::remove(
Key key, size_t offset,
std::lock_guard & cache_lock, std::lock_guard & /* segment_lock */)
@@ -668,6 +708,38 @@ bool LRUFileCache::isLastFileSegmentHolder(
return cell->file_segment.use_count() == 2;
}
+FileSegments LRUFileCache::getSnapshot() const
+{
+ std::lock_guard cache_lock(mutex);
+
+ FileSegments file_segments;
+
+ for (const auto & [key, cells_by_offset] : files)
+ {
+ for (const auto & [offset, cell] : cells_by_offset)
+ file_segments.push_back(FileSegment::getSnapshot(cell.file_segment, cache_lock));
+ }
+
+ return file_segments;
+}
+
+std::vector LRUFileCache::tryGetCachePaths(const Key & key)
+{
+ std::lock_guard cache_lock(mutex);
+
+ std::vector cache_paths;
+
+ const auto & cells_by_offset = files[key];
+
+ for (const auto & [offset, cell] : cells_by_offset)
+ {
+ if (cell.file_segment->state() == FileSegment::State::DOWNLOADED)
+ cache_paths.push_back(getPathInLocalCache(key, offset));
+ }
+
+ return cache_paths;
+}
+
LRUFileCache::FileSegmentCell::FileSegmentCell(FileSegmentPtr file_segment_, LRUQueue & queue_)
: file_segment(file_segment_)
{
@@ -685,12 +757,13 @@ LRUFileCache::FileSegmentCell::FileSegmentCell(FileSegmentPtr file_segment_, LRU
break;
}
case FileSegment::State::EMPTY:
+ case FileSegment::State::DOWNLOADING:
{
break;
}
default:
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
- "Can create cell with either DOWNLOADED or EMPTY state, got: {}",
+ "Can create cell with either EMPTY, DOWNLOADED, DOWNLOADING state, got: {}",
FileSegment::stateToString(file_segment->download_state));
}
}
diff --git a/src/Common/FileCache.h b/src/Common/FileCache.h
index d58711cef0a..e706376bc89 100644
--- a/src/Common/FileCache.h
+++ b/src/Common/FileCache.h
@@ -33,9 +33,7 @@ public:
IFileCache(
const String & cache_base_path_,
- size_t max_size_,
- size_t max_element_size_,
- size_t max_file_segment_size_);
+ const FileCacheSettings & cache_settings_);
virtual ~IFileCache() = default;
@@ -44,7 +42,9 @@ public:
virtual void remove(const Key & key) = 0;
- static bool shouldBypassCache();
+ virtual void tryRemoveAll() = 0;
+
+ static bool isReadOnly();
/// Cache capacity in bytes.
size_t capacity() const { return max_size; }
@@ -55,6 +55,10 @@ public:
String getPathInLocalCache(const Key & key);
+ const String & getBasePath() const { return cache_base_path; }
+
+ virtual std::vector tryGetCachePaths(const Key & key) = 0;
+
/**
* Given an `offset` and `size` representing [offset, offset + size) bytes interval,
* return list of cached non-overlapping non-empty
@@ -68,6 +72,10 @@ public:
*/
virtual FileSegmentsHolder getOrSet(const Key & key, size_t offset, size_t size) = 0;
+ virtual FileSegmentsHolder setDownloading(const Key & key, size_t offset, size_t size) = 0;
+
+ virtual FileSegments getSnapshot() const = 0;
+
/// For debug.
virtual String dumpStructure(const Key & key) = 0;
@@ -112,16 +120,22 @@ class LRUFileCache final : public IFileCache
public:
LRUFileCache(
const String & cache_base_path_,
- size_t max_size_,
- size_t max_element_size_ = REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS,
- size_t max_file_segment_size_ = REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE);
+ const FileCacheSettings & cache_settings_);
FileSegmentsHolder getOrSet(const Key & key, size_t offset, size_t size) override;
+ FileSegments getSnapshot() const override;
+
+ FileSegmentsHolder setDownloading(const Key & key, size_t offset, size_t size) override;
+
void initialize() override;
void remove(const Key & key) override;
+ void tryRemoveAll() override;
+
+ std::vector tryGetCachePaths(const Key & key) override;
+
private:
using FileKeyAndOffset = std::pair;
using LRUQueue = std::list;
@@ -194,8 +208,8 @@ private:
void loadCacheInfoIntoMemory();
- FileSegments splitRangeIntoEmptyCells(
- const Key & key, size_t offset, size_t size, std::lock_guard & cache_lock);
+ FileSegments splitRangeIntoCells(
+ const Key & key, size_t offset, size_t size, FileSegment::State state, std::lock_guard & cache_lock);
String dumpStructureImpl(const Key & key_, std::lock_guard & cache_lock);
diff --git a/src/Common/FileCacheFactory.cpp b/src/Common/FileCacheFactory.cpp
index fc8dff0b26c..9eadea05547 100644
--- a/src/Common/FileCacheFactory.cpp
+++ b/src/Common/FileCacheFactory.cpp
@@ -15,28 +15,53 @@ FileCacheFactory & FileCacheFactory::instance()
return ret;
}
-FileCachePtr FileCacheFactory::getImpl(const std::string & cache_base_path, std::lock_guard &)
+FileCacheFactory::CacheByBasePath FileCacheFactory::getAll()
+{
+ std::lock_guard lock(mutex);
+ return caches;
+}
+
+const FileCacheSettings & FileCacheFactory::getSettings(const std::string & cache_base_path)
+{
+ std::lock_guard lock(mutex);
+
+ auto * cache_data = getImpl(cache_base_path, lock);
+ if (cache_data)
+ return cache_data->settings;
+
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by path: {}", cache_base_path);
+}
+
+FileCacheFactory::CacheData * FileCacheFactory::getImpl(const std::string & cache_base_path, std::lock_guard &)
{
auto it = caches.find(cache_base_path);
if (it == caches.end())
return nullptr;
- return it->second;
+ return &it->second;
+}
+
+FileCachePtr FileCacheFactory::get(const std::string & cache_base_path)
+{
+ std::lock_guard lock(mutex);
+
+ auto * cache_data = getImpl(cache_base_path, lock);
+ if (cache_data)
+ return cache_data->cache;
+
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "No cache found by path: {}", cache_base_path);
}
FileCachePtr FileCacheFactory::getOrCreate(
- const std::string & cache_base_path, size_t max_size, size_t max_elements_size, size_t max_file_segment_size)
+ const std::string & cache_base_path, const FileCacheSettings & file_cache_settings)
{
std::lock_guard lock(mutex);
- auto cache = getImpl(cache_base_path, lock);
- if (cache)
- {
- if (cache->capacity() != max_size)
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cache with path `{}` already exists, but has different max size", cache_base_path);
- return cache;
- }
- cache = std::make_shared(cache_base_path, max_size, max_elements_size, max_file_segment_size);
- caches.emplace(cache_base_path, cache);
+ auto * cache_data = getImpl(cache_base_path, lock);
+ if (cache_data)
+ return cache_data->cache;
+
+ auto cache = std::make_shared(cache_base_path, file_cache_settings);
+ caches.emplace(cache_base_path, CacheData(cache, file_cache_settings));
return cache;
}
diff --git a/src/Common/FileCacheFactory.h b/src/Common/FileCacheFactory.h
index f2432f03cae..3518f487b6d 100644
--- a/src/Common/FileCacheFactory.h
+++ b/src/Common/FileCacheFactory.h
@@ -1,6 +1,7 @@
#pragma once
#include
+#include
#include
#include
@@ -14,16 +15,32 @@ namespace DB
*/
class FileCacheFactory final : private boost::noncopyable
{
+ struct CacheData
+ {
+ FileCachePtr cache;
+ FileCacheSettings settings;
+
+ CacheData(FileCachePtr cache_, const FileCacheSettings & settings_) : cache(cache_), settings(settings_) {}
+ };
+
+ using CacheByBasePath = std::unordered_map;
+
public:
static FileCacheFactory & instance();
- FileCachePtr getOrCreate(const std::string & cache_base_path, size_t max_size, size_t max_elements_size, size_t max_file_segment_size);
+ FileCachePtr getOrCreate(const std::string & cache_base_path, const FileCacheSettings & file_cache_settings);
+
+ FileCachePtr get(const std::string & cache_base_path);
+
+ CacheByBasePath getAll();
+
+ const FileCacheSettings & getSettings(const std::string & cache_base_path);
private:
- FileCachePtr getImpl(const std::string & cache_base_path, std::lock_guard &);
+ CacheData * getImpl(const std::string & cache_base_path, std::lock_guard &);
std::mutex mutex;
- std::unordered_map caches;
+ CacheByBasePath caches;
};
}
diff --git a/src/Common/FileCacheSettings.cpp b/src/Common/FileCacheSettings.cpp
new file mode 100644
index 00000000000..f555de277b2
--- /dev/null
+++ b/src/Common/FileCacheSettings.cpp
@@ -0,0 +1,16 @@
+#include "FileCacheSettings.h"
+
+#include
+
+namespace DB
+{
+
+void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix)
+{
+ max_size = config.getUInt64(config_prefix + ".data_cache_max_size", REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_CACHE_SIZE);
+ max_elements = config.getUInt64(config_prefix + ".data_cache_max_elements", REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS);
+ max_file_segment_size = config.getUInt64(config_prefix + ".max_file_segment_size", REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE);
+ cache_on_write_operations = config.getUInt64(config_prefix + ".cache_on_write_operations", false);
+}
+
+}
diff --git a/src/Common/FileCacheSettings.h b/src/Common/FileCacheSettings.h
new file mode 100644
index 00000000000..0b34e1e3d82
--- /dev/null
+++ b/src/Common/FileCacheSettings.h
@@ -0,0 +1,20 @@
+#pragma once
+
+#include
+
+namespace Poco { namespace Util { class AbstractConfiguration; } }
+
+namespace DB
+{
+
+struct FileCacheSettings
+{
+ size_t max_size = 0;
+ size_t max_elements = REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS;
+ size_t max_file_segment_size = REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE;
+ bool cache_on_write_operations = false;
+
+ void loadFromConfig(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix);
+};
+
+}
diff --git a/src/Common/FileCache_fwd.h b/src/Common/FileCache_fwd.h
index cab1525600b..7448f0c8c89 100644
--- a/src/Common/FileCache_fwd.h
+++ b/src/Common/FileCache_fwd.h
@@ -4,10 +4,13 @@
namespace DB
{
+static constexpr int REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_CACHE_SIZE = 1024 * 1024 * 1024;
static constexpr int REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_FILE_SEGMENT_SIZE = 100 * 1024 * 1024;
static constexpr int REMOTE_FS_OBJECTS_CACHE_DEFAULT_MAX_ELEMENTS = 1024 * 1024;
class IFileCache;
using FileCachePtr = std::shared_ptr;
+struct FileCacheSettings;
+
}
diff --git a/src/Common/FileSegment.cpp b/src/Common/FileSegment.cpp
index ac89721683e..5a13ea7d207 100644
--- a/src/Common/FileSegment.cpp
+++ b/src/Common/FileSegment.cpp
@@ -31,10 +31,34 @@ FileSegment::FileSegment(
, log(&Poco::Logger::get("FileSegment"))
#endif
{
- if (download_state == State::DOWNLOADED)
- reserved_size = downloaded_size = size_;
- else if (download_state != State::EMPTY)
- throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Can create cell with either DOWNLOADED or EMPTY state");
+ /// On creation, file segment state can be EMPTY, DOWNLOADED, DOWNLOADING.
+ switch (download_state)
+ {
+ /// EMPTY is used when file segment is not in cache and
+ /// someone will _potentially_ want to download it (after calling getOrSetDownloader()).
+ case (State::EMPTY):
+ {
+ break;
+ }
+ /// DOWNLOADED is used either on initial cache metadata load into memory on server startup
+ /// or on reduceSizeToDownloaded() -- when file segment object is updated.
+ case (State::DOWNLOADED):
+ {
+ reserved_size = downloaded_size = size_;
+ break;
+ }
+ /// DOWNLOADING is used only for write-through caching (e.g. getOrSetDownloader() is not
+ /// needed, downloader is set on file segment creation).
+ case (State::DOWNLOADING):
+ {
+ downloader_id = getCallerId();
+ break;
+ }
+ default:
+ {
+ throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Can create cell with either EMPTY, DOWNLOADED, DOWNLOADING state");
+ }
+ }
}
FileSegment::State FileSegment::state() const
@@ -49,6 +73,12 @@ size_t FileSegment::getDownloadOffset() const
return range().left + getDownloadedSize(segment_lock);
}
+size_t FileSegment::getDownloadedSize() const
+{
+ std::lock_guard segment_lock(mutex);
+ return getDownloadedSize(segment_lock);
+}
+
size_t FileSegment::getDownloadedSize(std::lock_guard & /* segment_lock */) const
{
if (download_state == State::DOWNLOADED)
@@ -60,24 +90,15 @@ size_t FileSegment::getDownloadedSize(std::lock_guard & /* segment_l
String FileSegment::getCallerId()
{
- return getCallerIdImpl(false);
+ return getCallerIdImpl();
}
-String FileSegment::getCallerIdImpl(bool allow_non_strict_checking)
+String FileSegment::getCallerIdImpl()
{
- if (IFileCache::shouldBypassCache())
- {
- /// getCallerId() can be called from completeImpl(), which can be called from complete().
- /// complete() is called from destructor of CachedReadBufferFromRemoteFS when there is no query id anymore.
- /// Allow non strict checking in this case. This works correctly as if getCallerIdImpl() is called from destructor,
- /// then we know that caller is not a downloader, because downloader is reset each nextImpl() call either
- /// manually or via SCOPE_EXIT.
-
- if (allow_non_strict_checking)
- return "None";
-
- throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Cannot use cache without query id");
- }
+ if (!CurrentThread::isInitialized()
+ || !CurrentThread::get().getQueryContext()
+ || CurrentThread::getQueryId().size == 0)
+ return "None:" + toString(getThreadId());
return CurrentThread::getQueryId().toString() + ":" + toString(getThreadId());
}
@@ -136,7 +157,6 @@ String FileSegment::getDownloader() const
bool FileSegment::isDownloader() const
{
std::lock_guard segment_lock(mutex);
- LOG_TEST(log, "Checking for current downloader. Caller: {}, downloader: {}, current state: {}", getCallerId(), downloader_id, stateToString(download_state));
return getCallerId() == downloader_id;
}
@@ -221,15 +241,9 @@ void FileSegment::write(const char * from, size_t size, size_t offset_)
{
std::lock_guard segment_lock(mutex);
- auto info = getInfoForLogImpl(segment_lock);
- e.addMessage("while writing into cache, info: " + info);
+ wrapWithCacheInfo(e, "while writing into cache", segment_lock);
- LOG_ERROR(log, "Failed to write to cache. File segment info: {}", info);
-
- download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION;
-
- cache_writer->finalize();
- cache_writer.reset();
+ setDownloadFailed(segment_lock);
cv.notify_all();
@@ -239,6 +253,77 @@ void FileSegment::write(const char * from, size_t size, size_t offset_)
assert(getDownloadOffset() == offset_ + size);
}
+void FileSegment::writeInMemory(const char * from, size_t size)
+{
+ if (!size)
+ throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Attempt to write zero size cache file");
+
+ if (availableSize() < size)
+ throw Exception(
+ ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR,
+ "Not enough space is reserved. Available: {}, expected: {}", availableSize(), size);
+
+ std::lock_guard segment_lock(mutex);
+
+ if (cache_writer)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache writer already initialized");
+
+ auto download_path = cache->getPathInLocalCache(key(), offset());
+ cache_writer = std::make_unique(download_path, size + 1);
+
+ try
+ {
+ cache_writer->write(from, size);
+ }
+ catch (Exception & e)
+ {
+ wrapWithCacheInfo(e, "while writing into cache", segment_lock);
+
+ setDownloadFailed(segment_lock);
+
+ cv.notify_all();
+
+ throw;
+ }
+}
+
+size_t FileSegment::finalizeWrite()
+{
+ std::lock_guard segment_lock(mutex);
+
+ if (!cache_writer)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache writer not initialized");
+
+ size_t size = cache_writer->offset();
+
+ if (size == 0)
+ throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Writing size is not allowed");
+
+ try
+ {
+ cache_writer->next();
+ }
+ catch (Exception & e)
+ {
+ wrapWithCacheInfo(e, "while writing into cache", segment_lock);
+
+ setDownloadFailed(segment_lock);
+
+ cv.notify_all();
+
+ throw;
+ }
+
+ downloaded_size += size;
+
+ if (downloaded_size != range().size())
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected downloaded size to equal file segment size ({} == {})", downloaded_size, range().size());
+
+ setDownloaded(segment_lock);
+
+ return size;
+}
+
FileSegment::State FileSegment::wait()
{
std::unique_lock segment_lock(mutex);
@@ -303,6 +388,20 @@ void FileSegment::setDownloaded(std::lock_guard & /* segment_lock */
{
download_state = State::DOWNLOADED;
is_downloaded = true;
+ downloader_id.clear();
+
+ if (cache_writer)
+ {
+ cache_writer->finalize();
+ cache_writer.reset();
+ remote_file_reader.reset();
+ }
+}
+
+void FileSegment::setDownloadFailed(std::lock_guard & /* segment_lock */)
+{
+ download_state = State::PARTIALLY_DOWNLOADED_NO_CONTINUATION;
+ downloader_id.clear();
if (cache_writer)
{
@@ -360,7 +459,7 @@ void FileSegment::complete(State state)
}
catch (...)
{
- if (!downloader_id.empty() && downloader_id == getCallerIdImpl(true))
+ if (!downloader_id.empty() && downloader_id == getCallerIdImpl())
downloader_id.clear();
cv.notify_all();
@@ -385,7 +484,7 @@ void FileSegment::complete(std::lock_guard & cache_lock)
/// Segment state can be changed from DOWNLOADING or EMPTY only if the caller is the
/// downloader or the only owner of the segment.
- bool can_update_segment_state = downloader_id == getCallerIdImpl(true)
+ bool can_update_segment_state = downloader_id == getCallerIdImpl()
|| cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
if (can_update_segment_state)
@@ -394,11 +493,11 @@ void FileSegment::complete(std::lock_guard & cache_lock)
try
{
- completeImpl(cache_lock, segment_lock, /* allow_non_strict_checking */true);
+ completeImpl(cache_lock, segment_lock);
}
catch (...)
{
- if (!downloader_id.empty() && downloader_id == getCallerIdImpl(true))
+ if (!downloader_id.empty() && downloader_id == getCallerIdImpl())
downloader_id.clear();
cv.notify_all();
@@ -408,7 +507,7 @@ void FileSegment::complete(std::lock_guard & cache_lock)
cv.notify_all();
}
-void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lock_guard & segment_lock, bool allow_non_strict_checking)
+void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lock_guard & segment_lock)
{
bool is_last_holder = cache->isLastFileSegmentHolder(key(), offset(), cache_lock, segment_lock);
@@ -444,7 +543,7 @@ void FileSegment::completeImpl(std::lock_guard & cache_lock, std::lo
}
}
- if (!downloader_id.empty() && (downloader_id == getCallerIdImpl(allow_non_strict_checking) || is_last_holder))
+ if (!downloader_id.empty() && (downloader_id == getCallerIdImpl() || is_last_holder))
{
LOG_TEST(log, "Clearing downloader id: {}, current state: {}", downloader_id, stateToString(download_state));
downloader_id.clear();
@@ -471,6 +570,11 @@ String FileSegment::getInfoForLogImpl(std::lock_guard & segment_lock
return info.str();
}
+void FileSegment::wrapWithCacheInfo(Exception & e, const String & message, std::lock_guard & segment_lock) const
+{
+ e.addMessage(fmt::format("{}, current cache state: {}", message, getInfoForLogImpl(segment_lock)));
+}
+
String FileSegment::stateToString(FileSegment::State state)
{
switch (state)
@@ -504,6 +608,23 @@ void FileSegment::assertCorrectnessImpl(std::lock_guard & /* segment
assert(download_state != FileSegment::State::DOWNLOADED || std::filesystem::file_size(cache->getPathInLocalCache(key(), offset())) > 0);
}
+FileSegmentPtr FileSegment::getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard & /* cache_lock */)
+{
+ auto snapshot = std::make_shared(
+ file_segment->offset(),
+ file_segment->range().size(),
+ file_segment->key(),
+ nullptr,
+ State::EMPTY);
+
+ snapshot->hits_count = file_segment->getHitsCount();
+ snapshot->ref_count = file_segment.use_count();
+ snapshot->downloaded_size = file_segment->getDownloadedSize();
+ snapshot->download_state = file_segment->state();
+
+ return snapshot;
+}
+
FileSegmentsHolder::~FileSegmentsHolder()
{
/// In CacheableReadBufferFromRemoteFS file segment's downloader removes file segments from
diff --git a/src/Common/FileSegment.h b/src/Common/FileSegment.h
index b7501640913..615fd9a56de 100644
--- a/src/Common/FileSegment.h
+++ b/src/Common/FileSegment.h
@@ -97,6 +97,15 @@ public:
void write(const char * from, size_t size, size_t offset_);
+ /**
+ * writeInMemory and finalizeWrite are used together to write a single file with delay.
+ * Both can be called only once, one after another. Used for writing cache via threadpool
+ * on wrote operations. TODO: this solution is temporary, until adding a separate cache layer.
+ */
+ void writeInMemory(const char * from, size_t size);
+
+ size_t finalizeWrite();
+
RemoteFileReaderPtr getRemoteFileReader();
void setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_);
@@ -117,14 +126,24 @@ public:
size_t getDownloadOffset() const;
+ size_t getDownloadedSize() const;
+
void completeBatchAndResetDownloader();
void complete(State state);
String getInfoForLog() const;
+ size_t getHitsCount() const { return hits_count; }
+
+ size_t getRefCount() const { return ref_count; }
+
+ void incrementHitsCount() { ++hits_count; }
+
void assertCorrectness() const;
+ static FileSegmentPtr getSnapshot(const FileSegmentPtr & file_segment, std::lock_guard & cache_lock);
+
private:
size_t availableSize() const { return reserved_size - downloaded_size; }
@@ -133,6 +152,9 @@ private:
void assertCorrectnessImpl(std::lock_guard & segment_lock) const;
void setDownloaded(std::lock_guard & segment_lock);
+ void setDownloadFailed(std::lock_guard & segment_lock);
+
+ void wrapWithCacheInfo(Exception & e, const String & message, std::lock_guard & segment_lock) const;
bool lastFileSegmentHolder() const;
@@ -144,9 +166,9 @@ private:
void completeImpl(
std::lock_guard & cache_lock,
- std::lock_guard & segment_lock, bool allow_non_strict_checking = false);
+ std::lock_guard & segment_lock);
- static String getCallerIdImpl(bool allow_non_strict_checking = false);
+ static String getCallerIdImpl();
void resetDownloaderImpl(std::lock_guard & segment_lock);
@@ -180,6 +202,8 @@ private:
bool detached = false;
std::atomic is_downloaded{false};
+ std::atomic hits_count = 0; /// cache hits.
+ std::atomic ref_count = 0; /// Used for getting snapshot state
};
struct FileSegmentsHolder : private boost::noncopyable
diff --git a/src/Common/SystemLogBase.cpp b/src/Common/SystemLogBase.cpp
index e5991421633..88e6e8327b8 100644
--- a/src/Common/SystemLogBase.cpp
+++ b/src/Common/SystemLogBase.cpp
@@ -11,6 +11,7 @@
#include
#include
#include
+#include
#include
#include
diff --git a/src/Common/SystemLogBase.h b/src/Common/SystemLogBase.h
index cfb4821691c..da3d6c24562 100644
--- a/src/Common/SystemLogBase.h
+++ b/src/Common/SystemLogBase.h
@@ -23,6 +23,7 @@
M(QueryViewsLogElement) \
M(SessionLogElement) \
M(TraceLogElement) \
+ M(TransactionsInfoLogElement) \
M(ZooKeeperLogElement) \
M(ProcessorProfileLogElement) \
M(TextLogElement)
diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h
index f3920474111..3d7ec08cdaf 100644
--- a/src/Common/ThreadStatus.h
+++ b/src/Common/ThreadStatus.h
@@ -216,6 +216,11 @@ public:
return query_context.lock();
}
+ auto getGlobalContext() const
+ {
+ return global_context.lock();
+ }
+
void disableProfiling()
{
assert(!query_profiler_real && !query_profiler_cpu);
diff --git a/src/Common/TransactionID.cpp b/src/Common/TransactionID.cpp
new file mode 100644
index 00000000000..8a9894fbe53
--- /dev/null
+++ b/src/Common/TransactionID.cpp
@@ -0,0 +1,43 @@
+#include
+#include
+#include
+#include
+
+namespace DB
+{
+
+TIDHash TransactionID::getHash() const
+{
+ SipHash hash;
+ hash.update(start_csn);
+ hash.update(local_tid);
+ hash.update(host_id);
+ return hash.get64();
+}
+
+
+void TransactionID::write(const TransactionID & tid, WriteBuffer & buf)
+{
+ writeChar('(', buf);
+ writeText(tid.start_csn, buf);
+ writeCString(", ", buf);
+ writeText(tid.local_tid, buf);
+ writeCString(", ", buf);
+ writeText(tid.host_id, buf);
+ writeChar(')', buf);
+}
+
+TransactionID TransactionID::read(ReadBuffer & buf)
+{
+ TransactionID tid = Tx::EmptyTID;
+ assertChar('(', buf);
+ readText(tid.start_csn, buf);
+ assertString(", ", buf);
+ readText(tid.local_tid, buf);
+ assertString(", ", buf);
+ readText(tid.host_id, buf);
+ assertChar(')', buf);
+ return tid;
+}
+
+}
diff --git a/src/Common/TransactionID.h b/src/Common/TransactionID.h
new file mode 100644
index 00000000000..3ab86f7589c
--- /dev/null
+++ b/src/Common/TransactionID.h
@@ -0,0 +1,115 @@
+#pragma once
+#include
+#include
+#include
+#include
+
+namespace DB
+{
+
+class IDataType;
+using DataTypePtr = std::shared_ptr;
+class MergeTreeTransaction;
+
+/// This macro is useful for places where a pointer to current transaction should be passed,
+/// but transactions are not supported yet (e.g. when calling MergeTreeData's methods from StorageReplicatedMergeTree)
+/// or transaction object is not needed and not passed intentionally.
+#ifndef NO_TRANSACTION_PTR
+#define NO_TRANSACTION_PTR std::shared_ptr(nullptr)
+#define NO_TRANSACTION_RAW static_cast(nullptr)
+#endif
+
+/// Commit Sequence Number
+using CSN = UInt64;
+/// Local part of TransactionID
+using LocalTID = UInt64;
+/// Hash of TransactionID that fits into 64-bit atomic
+using TIDHash = UInt64;
+
+namespace Tx
+{
+ /// For transactions that are probably not committed (yet)
+ const CSN UnknownCSN = 0;
+ /// For changes were made without creating a transaction
+ const CSN PrehistoricCSN = 1;
+ /// Special reserved values
+ const CSN CommittingCSN = 2;
+ const CSN EverythingVisibleCSN = 3;
+ const CSN MaxReservedCSN = 32;
+
+ /// So far, that changes will never become visible
+ const CSN RolledBackCSN = std::numeric_limits::max();
+
+ const LocalTID PrehistoricLocalTID = 1;
+ const LocalTID DummyLocalTID = 2;
+ const LocalTID MaxReservedLocalTID = 32;
+}
+
+struct TransactionID
+{
+ /// Global sequential number, the newest commit timestamp the we saw when this transaction began
+ CSN start_csn = 0;
+ /// Local sequential that is unique for each transaction started by this host within specific start_csn
+ LocalTID local_tid = 0;
+ /// UUID of host that has started this transaction
+ UUID host_id = UUIDHelpers::Nil;
+
+ /// NOTE Maybe we could just generate UUIDv4 for each transaction, but it would be harder to debug.
+ /// Partial order is defined for this TransactionID structure:
+ /// (tid1.start_csn <= tid2.start_csn) <==> (tid1 <= tid2)
+ /// (tid1.start_csn == tid2.start_csn && tid1.host_id == tid2.host_id && tid1.local_tid < tid2.local_tid) ==> (tid1 < tid2)
+ /// If two transaction have the same start_csn, but were started by different hosts, then order is undefined.
+
+ bool operator == (const TransactionID & rhs) const
+ {
+ return start_csn == rhs.start_csn && local_tid == rhs.local_tid && host_id == rhs.host_id;
+ }
+
+ bool operator != (const TransactionID & rhs) const
+ {
+ return !(*this == rhs);
+ }
+
+ TIDHash getHash() const;
+
+ bool isEmpty() const
+ {
+ assert((local_tid == 0) == (start_csn == 0 && host_id == UUIDHelpers::Nil));
+ return local_tid == 0;
+ }
+
+ bool isPrehistoric() const
+ {
+ assert((local_tid == Tx::PrehistoricLocalTID) == (start_csn == Tx::PrehistoricCSN));
+ return local_tid == Tx::PrehistoricLocalTID;
+ }
+
+
+ static void write(const TransactionID & tid, WriteBuffer & buf);
+ static TransactionID read(ReadBuffer & buf);
+};
+
+namespace Tx
+{
+ const TransactionID EmptyTID = {0, 0, UUIDHelpers::Nil};
+ const TransactionID PrehistoricTID = {PrehistoricCSN, PrehistoricLocalTID, UUIDHelpers::Nil};
+ const TransactionID DummyTID = {PrehistoricCSN, DummyLocalTID, UUIDHelpers::Nil};
+}
+
+}
+
+template<>
+struct fmt::formatter
+{
+ template
+ constexpr auto parse(ParseContext & context)
+ {
+ return context.begin();
+ }
+
+ template
+ auto format(const DB::TransactionID & tid, FormatContext & context)
+ {
+ return fmt::format_to(context.out(), "({}, {}, {})", tid.start_csn, tid.local_tid, tid.host_id);
+ }
+};
diff --git a/src/Common/ZooKeeper/ZooKeeper.cpp b/src/Common/ZooKeeper/ZooKeeper.cpp
index aae3b6d4191..0f4b141d058 100644
--- a/src/Common/ZooKeeper/ZooKeeper.cpp
+++ b/src/Common/ZooKeeper/ZooKeeper.cpp
@@ -1270,4 +1270,14 @@ String extractZooKeeperPath(const String & path, bool check_starts_with_slash, P
return normalizeZooKeeperPath(path, check_starts_with_slash, log);
}
+String getSequentialNodeName(const String & prefix, UInt64 number)
+{
+ /// NOTE Sequential counter in ZooKeeper is Int32.
+ assert(number < std::numeric_limits::max());
+ constexpr size_t seq_node_digits = 10;
+ String num_str = std::to_string(number);
+ String name = prefix + String(seq_node_digits - num_str.size(), '0') + num_str;
+ return name;
+}
+
}
diff --git a/src/Common/ZooKeeper/ZooKeeper.h b/src/Common/ZooKeeper/ZooKeeper.h
index 0f7eccd2547..4d5bd039a55 100644
--- a/src/Common/ZooKeeper/ZooKeeper.h
+++ b/src/Common/ZooKeeper/ZooKeeper.h
@@ -417,4 +417,6 @@ String extractZooKeeperName(const String & path);
String extractZooKeeperPath(const String & path, bool check_starts_with_slash, Poco::Logger * log = nullptr);
+String getSequentialNodeName(const String & prefix, UInt64 number);
+
}
diff --git a/src/Common/tests/gtest_lru_file_cache.cpp b/src/Common/tests/gtest_lru_file_cache.cpp
index c08b12857a1..dfcf51ddf2f 100644
--- a/src/Common/tests/gtest_lru_file_cache.cpp
+++ b/src/Common/tests/gtest_lru_file_cache.cpp
@@ -4,6 +4,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -102,7 +103,10 @@ TEST(LRUFileCache, get)
query_context->setCurrentQueryId("query_id");
DB::CurrentThread::QueryScope query_scope_holder(query_context);
- auto cache = DB::LRUFileCache(cache_base_path, 30, 5);
+ DB::FileCacheSettings settings;
+ settings.max_size = 30;
+ settings.max_elements = 5;
+ auto cache = DB::LRUFileCache(cache_base_path, settings);
cache.initialize();
auto key = cache.hash("key1");
@@ -472,7 +476,7 @@ TEST(LRUFileCache, get)
{
/// Test LRUCache::restore().
- auto cache2 = DB::LRUFileCache(cache_base_path, 30, 5);
+ auto cache2 = DB::LRUFileCache(cache_base_path, settings);
cache2.initialize();
ASSERT_EQ(cache2.getStat().downloaded_size, 5);
@@ -491,7 +495,9 @@ TEST(LRUFileCache, get)
{
/// Test max file segment size
- auto cache2 = DB::LRUFileCache(caches_dir / "cache2", 30, 5, /* max_file_segment_size */10);
+ auto settings2 = settings;
+ settings2.max_file_segment_size = 10;
+ auto cache2 = DB::LRUFileCache(caches_dir / "cache2", settings2);
cache2.initialize();
auto holder1 = cache2.getOrSet(key, 0, 25); /// Get [0, 24]
diff --git a/src/Core/Settings.h b/src/Core/Settings.h
index 2cbfe97cde5..aa78456702c 100644
--- a/src/Core/Settings.h
+++ b/src/Core/Settings.h
@@ -465,6 +465,7 @@ class IColumn;
M(Bool, use_compact_format_in_distributed_parts_names, true, "Changes format of directories names for distributed table insert parts.", 0) \
M(Bool, validate_polygons, true, "Throw exception if polygon is invalid in function pointInPolygon (e.g. self-tangent, self-intersecting). If the setting is false, the function will accept invalid polygons but may silently return wrong result.", 0) \
M(UInt64, max_parser_depth, DBMS_DEFAULT_MAX_PARSER_DEPTH, "Maximum parser depth (recursion depth of recursive descend parser).", 0) \
+ M(Bool, allow_settings_after_format_in_insert, false, "Allow SETTINGS after FORMAT, but note, that this is not always safe (note: this is a compatibility setting).", 0) \
M(Seconds, temporary_live_view_timeout, DEFAULT_TEMPORARY_LIVE_VIEW_TIMEOUT_SEC, "Timeout after which temporary live view is deleted.", 0) \
M(Seconds, periodic_live_view_refresh, DEFAULT_PERIODIC_LIVE_VIEW_REFRESH_SEC, "Interval after which periodically refreshed live view is forced to refresh.", 0) \
M(Bool, transform_null_in, false, "If enabled, NULL values will be matched with 'IN' operator as if they are considered equal.", 0) \
@@ -560,8 +561,10 @@ class IColumn;
\
M(UInt64, remote_fs_read_max_backoff_ms, 10000, "Max wait time when trying to read data for remote disk", 0) \
M(UInt64, remote_fs_read_backoff_max_tries, 5, "Max attempts to read with backoff", 0) \
- M(Bool, remote_fs_enable_cache, true, "Use cache for remote filesystem. This setting does not turn on/off cache for disks (must me done via disk config), but allows to bypass cache for some queries if intended", 0) \
- M(UInt64, remote_fs_cache_max_wait_sec, 5, "Allow to wait at most this number of seconds for download of current remote_fs_buffer_size bytes, and skip cache if exceeded", 0) \
+ M(Bool, enable_filesystem_cache, true, "Use cache for remote filesystem. This setting does not turn on/off cache for disks (must me done via disk config), but allows to bypass cache for some queries if intended", 0) \
+ M(UInt64, filesystem_cache_max_wait_sec, 5, "Allow to wait at most this number of seconds for download of current remote_fs_buffer_size bytes, and skip cache if exceeded", 0) \
+ M(Bool, enable_filesystem_cache_on_write_operations, false, "Write into cache on write operations. To actually work this setting requires be added to disk config too", 0) \
+ M(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, "", 0) \
\
M(UInt64, http_max_tries, 10, "Max attempts to read via http.", 0) \
M(UInt64, http_retry_initial_backoff_ms, 100, "Min milliseconds for backoff, when retrying read via http", 0) \
@@ -578,6 +581,7 @@ class IColumn;
M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
M(String, insert_deduplication_token, "", "If not empty, used for duplicate detection instead of data digest", 0) \
+ M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
// End of COMMON_SETTINGS
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.
@@ -638,6 +642,12 @@ class IColumn;
M(UInt64, input_format_msgpack_number_of_columns, 0, "The number of columns in inserted MsgPack data. Used for automatic schema inference from data.", 0) \
M(MsgPackUUIDRepresentation, output_format_msgpack_uuid_representation, FormatSettings::MsgPackUUIDRepresentation::EXT, "The way how to output UUID in MsgPack format.", 0) \
M(UInt64, input_format_max_rows_to_read_for_schema_inference, 100, "The maximum rows of data to read for automatic schema inference", 0) \
+ M(Bool, input_format_csv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in CSV format", 0) \
+ M(Bool, input_format_tsv_use_best_effort_in_schema_inference, true, "Use some tweaks and heuristics to infer schema in TSV format", 0) \
+ M(Bool, input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference, false, "Allow to skip columns with unsupported types while schema inference for format Parquet", 0) \
+ M(Bool, input_format_orc_skip_columns_with_unsupported_types_in_schema_inference, false, "Allow to skip columns with unsupported types while schema inference for format ORC", 0) \
+ M(Bool, input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference, false, "Allow to skip columns with unsupported types while schema inference for format Arrow", 0) \
+ M(String, column_names_for_schema_inference, "", "The list of column names to use in schema inference for formats without column names. The format: 'column1,column2,column3,...'", 0) \
M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \
\
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h
index 1168013488e..7fe54c12665 100644
--- a/src/Core/SettingsEnums.h
+++ b/src/Core/SettingsEnums.h
@@ -165,7 +165,7 @@ DECLARE_SETTING_ENUM(DistributedDDLOutputMode)
enum class HandleKafkaErrorMode
{
- DEFAULT = 0, // Ignore errors whit threshold.
+ DEFAULT = 0, // Ignore errors with threshold.
STREAM, // Put errors to stream in the virtual column named ``_error.
/*FIXED_SYSTEM_TABLE, Put errors to in a fixed system table likey system.kafka_errors. This is not implemented now. */
/*CUSTOM_SYSTEM_TABLE, Put errors to in a custom system table. This is not implemented now. */
diff --git a/src/DataTypes/DataTypeMap.cpp b/src/DataTypes/DataTypeMap.cpp
index 41de17982aa..42ec739c33b 100644
--- a/src/DataTypes/DataTypeMap.cpp
+++ b/src/DataTypes/DataTypeMap.cpp
@@ -45,22 +45,7 @@ DataTypeMap::DataTypeMap(const DataTypePtr & key_type_, const DataTypePtr & valu
void DataTypeMap::assertKeyType() const
{
- bool type_error = false;
- if (key_type->getTypeId() == TypeIndex::LowCardinality)
- {
- const auto & low_cardinality_data_type = assert_cast(*key_type);
- if (!isStringOrFixedString(*(low_cardinality_data_type.getDictionaryType())))
- type_error = true;
- }
- else if (!key_type->isValueRepresentedByInteger()
- && !isStringOrFixedString(*key_type)
- && !WhichDataType(key_type).isNothing()
- && !WhichDataType(key_type).isUUID())
- {
- type_error = true;
- }
-
- if (type_error)
+ if (!checkKeyType(key_type))
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Type of Map key must be a type, that can be represented by integer or String or FixedString (possibly LowCardinality) or UUID,"
" but {} given", key_type->getName());
@@ -102,6 +87,25 @@ bool DataTypeMap::equals(const IDataType & rhs) const
return nested->equals(*rhs_map.nested);
}
+bool DataTypeMap::checkKeyType(DataTypePtr key_type)
+{
+ if (key_type->getTypeId() == TypeIndex::LowCardinality)
+ {
+ const auto & low_cardinality_data_type = assert_cast(*key_type);
+ if (!isStringOrFixedString(*(low_cardinality_data_type.getDictionaryType())))
+ return false;
+ }
+ else if (!key_type->isValueRepresentedByInteger()
+ && !isStringOrFixedString(*key_type)
+ && !WhichDataType(key_type).isNothing()
+ && !WhichDataType(key_type).isUUID())
+ {
+ return false;
+ }
+
+ return true;
+}
+
static DataTypePtr create(const ASTPtr & arguments)
{
if (!arguments || arguments->children.size() != 2)
diff --git a/src/DataTypes/DataTypeMap.h b/src/DataTypes/DataTypeMap.h
index 65bdd93ca4d..479008031fe 100644
--- a/src/DataTypes/DataTypeMap.h
+++ b/src/DataTypes/DataTypeMap.h
@@ -48,6 +48,8 @@ public:
SerializationPtr doGetDefaultSerialization() const override;
+ static bool checkKeyType(DataTypePtr key_type);
+
private:
void assertKeyType() const;
};
diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp
index d94eceb7dec..2a07ba8375d 100644
--- a/src/Databases/DatabaseReplicated.cpp
+++ b/src/Databases/DatabaseReplicated.cpp
@@ -461,6 +461,10 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_
BlockIO DatabaseReplicated::tryEnqueueReplicatedDDL(const ASTPtr & query, ContextPtr query_context)
{
+
+ if (query_context->getCurrentTransaction() && query_context->getSettingsRef().throw_on_unsupported_query_inside_transaction)
+ throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Distributed DDL queries inside transactions are not supported");
+
if (is_readonly)
throw Exception(ErrorCodes::NO_ZOOKEEPER, "Database is in readonly mode, because it cannot connect to ZooKeeper");
diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp
index 84c3f857a81..5765f1c6598 100644
--- a/src/Databases/DatabaseReplicatedWorker.cpp
+++ b/src/Databases/DatabaseReplicatedWorker.cpp
@@ -319,7 +319,6 @@ bool DatabaseReplicatedDDLWorker::canRemoveQueueEntry(const String & entry_name,
void DatabaseReplicatedDDLWorker::initializeLogPointer(const String & processed_entry_name)
{
updateMaxDDLEntryID(processed_entry_name);
- assert(max_id.load() == parse(getAndSetZooKeeper()->get(fs::path(database->replica_path) / "log_ptr")));
}
UInt32 DatabaseReplicatedDDLWorker::getLogPointer() const
diff --git a/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.cpp b/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.cpp
index fb07d8c356b..78b9b9e3446 100644
--- a/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.cpp
+++ b/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.cpp
@@ -71,8 +71,8 @@ std::unique_ptr DiskAzureBlobStorage::readFile(
LOG_TEST(log, "Read from file by path: {}", backQuote(metadata_disk->getPath() + path));
auto reader_impl = std::make_unique(
- path, blob_container_client, metadata, settings->max_single_read_retries,
- settings->max_single_download_retries, read_settings);
+ blob_container_client, metadata.remote_fs_root_path, metadata.remote_fs_objects,
+ settings->max_single_read_retries, settings->max_single_download_retries, read_settings);
if (read_settings.remote_fs_method == RemoteFSReadMethod::threadpool)
{
@@ -90,7 +90,8 @@ std::unique_ptr DiskAzureBlobStorage::readFile(
std::unique_ptr DiskAzureBlobStorage::writeFile(
const String & path,
size_t buf_size,
- WriteMode mode)
+ WriteMode mode,
+ const WriteSettings &)
{
auto blob_path = path + "_" + getRandomASCIIString(8); /// NOTE: path contains the tmp_* prefix in the blob name
@@ -108,7 +109,7 @@ std::unique_ptr DiskAzureBlobStorage::writeFile(
readOrCreateUpdateAndStoreMetadata(path, mode, false, [blob_path, count] (Metadata & metadata) { metadata.addObject(blob_path, count); return true; });
};
- return std::make_unique(std::move(buffer), std::move(create_metadata_callback), path);
+ return std::make_unique(std::move(buffer), std::move(create_metadata_callback), blob_path);
}
diff --git a/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.h b/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.h
index 63c3c735812..efc245e7eb3 100644
--- a/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.h
+++ b/src/Disks/AzureBlobStorage/DiskAzureBlobStorage.h
@@ -56,7 +56,8 @@ public:
std::unique_ptr writeFile(
const String & path,
size_t buf_size,
- WriteMode mode) override;
+ WriteMode mode,
+ const WriteSettings & settings) override;
DiskType getType() const override;
diff --git a/src/Disks/DiskCacheWrapper.cpp b/src/Disks/DiskCacheWrapper.cpp
index 3519b1212a4..cc2c330975a 100644
--- a/src/Disks/DiskCacheWrapper.cpp
+++ b/src/Disks/DiskCacheWrapper.cpp
@@ -150,7 +150,7 @@ DiskCacheWrapper::readFile(
/// Note: enabling `threadpool` read requires to call setReadUntilEnd().
current_read_settings.remote_fs_method = RemoteFSReadMethod::read;
/// Disable data cache.
- current_read_settings.remote_fs_enable_cache = false;
+ current_read_settings.enable_filesystem_cache = false;
if (metadata->status == DOWNLOADING)
{
@@ -167,7 +167,11 @@ DiskCacheWrapper::readFile(
auto tmp_path = path + ".tmp";
{
auto src_buffer = DiskDecorator::readFile(path, current_read_settings, read_hint, file_size);
- auto dst_buffer = cache_disk->writeFile(tmp_path, settings.local_fs_buffer_size, WriteMode::Rewrite);
+
+ WriteSettings write_settings;
+ write_settings.enable_filesystem_cache_on_write_operations = false;
+
+ auto dst_buffer = cache_disk->writeFile(tmp_path, settings.local_fs_buffer_size, WriteMode::Rewrite, write_settings);
copyData(*src_buffer, *dst_buffer);
}
cache_disk->moveFile(tmp_path, path);
@@ -196,10 +200,15 @@ DiskCacheWrapper::readFile(
}
std::unique_ptr
-DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode)
+DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings)
{
if (!cache_file_predicate(path))
- return DiskDecorator::writeFile(path, buf_size, mode);
+ return DiskDecorator::writeFile(path, buf_size, mode, settings);
+
+ WriteSettings current_settings = settings;
+ /// There are two different cache implementations. Disable second one if the first is enabled.
+ /// The first will soon be removed, this disabling is temporary.
+ current_settings.enable_filesystem_cache_on_write_operations = false;
LOG_TEST(log, "Write file {} to cache", backQuote(path));
@@ -208,15 +217,15 @@ DiskCacheWrapper::writeFile(const String & path, size_t buf_size, WriteMode mode
cache_disk->createDirectories(dir_path);
return std::make_unique(
- cache_disk->writeFile(path, buf_size, mode),
+ cache_disk->writeFile(path, buf_size, mode, current_settings),
[this, path]()
{
/// Copy file from cache to actual disk when cached buffer is finalized.
return cache_disk->readFile(path, ReadSettings(), /* read_hint= */ {}, /* file_size= */ {});
},
- [this, path, buf_size, mode]()
+ [this, path, buf_size, mode, current_settings]()
{
- return DiskDecorator::writeFile(path, buf_size, mode);
+ return DiskDecorator::writeFile(path, buf_size, mode, current_settings);
});
}
diff --git a/src/Disks/DiskCacheWrapper.h b/src/Disks/DiskCacheWrapper.h
index dc66333758f..e413a3742f3 100644
--- a/src/Disks/DiskCacheWrapper.h
+++ b/src/Disks/DiskCacheWrapper.h
@@ -40,7 +40,7 @@ public:
std::optional read_hint,
std::optional file_size) const override;
- std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode) override;
+ std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &) override;
void removeFile(const String & path) override;
void removeFileIfExists(const String & path) override;
diff --git a/src/Disks/DiskDecorator.cpp b/src/Disks/DiskDecorator.cpp
index 37911f16913..14f507af55d 100644
--- a/src/Disks/DiskDecorator.cpp
+++ b/src/Disks/DiskDecorator.cpp
@@ -121,9 +121,9 @@ DiskDecorator::readFile(
}
std::unique_ptr
-DiskDecorator::writeFile(const String & path, size_t buf_size, WriteMode mode)
+DiskDecorator::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings)
{
- return delegate->writeFile(path, buf_size, mode);
+ return delegate->writeFile(path, buf_size, mode, settings);
}
void DiskDecorator::removeFile(const String & path)
diff --git a/src/Disks/DiskDecorator.h b/src/Disks/DiskDecorator.h
index bace54ff22a..e5c9c7699bf 100644
--- a/src/Disks/DiskDecorator.h
+++ b/src/Disks/DiskDecorator.h
@@ -44,7 +44,8 @@ public:
std::unique_ptr writeFile(
const String & path,
size_t buf_size,
- WriteMode mode) override;
+ WriteMode mode,
+ const WriteSettings & settings) override;
void removeFile(const String & path) override;
void removeFileIfExists(const String & path) override;
@@ -71,6 +72,9 @@ public:
void shutdown() override;
void startup() override;
void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & map) override;
+ String getCacheBasePath() const override { return delegate->getCacheBasePath(); }
+ std::vector getRemotePaths(const String & path) const override { return delegate->getRemotePaths(path); }
+ void getRemotePathsRecursive(const String & path, std::vector & paths_map) override { return delegate->getRemotePathsRecursive(path, paths_map); }
DiskPtr getMetadataDiskIfExistsOrSelf() override { return delegate->getMetadataDiskIfExistsOrSelf(); }
diff --git a/src/Disks/DiskEncrypted.cpp b/src/Disks/DiskEncrypted.cpp
index 714264b7720..3cee205fafc 100644
--- a/src/Disks/DiskEncrypted.cpp
+++ b/src/Disks/DiskEncrypted.cpp
@@ -269,7 +269,7 @@ std::unique_ptr DiskEncrypted::readFile(
return std::make_unique(settings.local_fs_buffer_size, std::move(buffer), key, header);
}
-std::unique_ptr DiskEncrypted::writeFile(const String & path, size_t buf_size, WriteMode mode)
+std::unique_ptr DiskEncrypted::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &)
{
auto wrapped_path = wrappedPath(path);
FileEncryption::Header header;
diff --git a/src/Disks/DiskEncrypted.h b/src/Disks/DiskEncrypted.h
index d99fe17457d..07a2ad81010 100644
--- a/src/Disks/DiskEncrypted.h
+++ b/src/Disks/DiskEncrypted.h
@@ -126,7 +126,8 @@ public:
std::unique_ptr writeFile(
const String & path,
size_t buf_size,
- WriteMode mode) override;
+ WriteMode mode,
+ const WriteSettings & settings) override;
void removeFile(const String & path) override
{
diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp
index a91db508295..8aad42ab475 100644
--- a/src/Disks/DiskLocal.cpp
+++ b/src/Disks/DiskLocal.cpp
@@ -345,7 +345,7 @@ std::unique_ptr DiskLocal::readFile(const String & path,
}
std::unique_ptr
-DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode)
+DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &)
{
int flags = (mode == WriteMode::Append) ? (O_APPEND | O_CREAT | O_WRONLY) : -1;
return std::make_unique(fs::path(disk_path) / path, buf_size, flags);
@@ -624,7 +624,7 @@ bool DiskLocal::setup()
pcg32_fast rng(randomSeed());
UInt32 magic_number = rng();
{
- auto buf = writeFile(disk_checker_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
+ auto buf = writeFile(disk_checker_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {});
writeIntBinary(magic_number, *buf);
}
disk_checker_magic_number = magic_number;
diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h
index 76d5a88a626..59dcf5e5c13 100644
--- a/src/Disks/DiskLocal.h
+++ b/src/Disks/DiskLocal.h
@@ -79,7 +79,8 @@ public:
std::unique_ptr writeFile(
const String & path,
size_t buf_size,
- WriteMode mode) override;
+ WriteMode mode,
+ const WriteSettings & settings) override;
void removeFile(const String & path) override;
void removeFileIfExists(const String & path) override;
diff --git a/src/Disks/DiskMemory.cpp b/src/Disks/DiskMemory.cpp
index abaea0846a5..4f0e881e079 100644
--- a/src/Disks/DiskMemory.cpp
+++ b/src/Disks/DiskMemory.cpp
@@ -326,7 +326,7 @@ std::unique_ptr DiskMemory::readFile(const String & path
return std::make_unique(path, iter->second.data);
}
-std::unique_ptr DiskMemory::writeFile(const String & path, size_t buf_size, WriteMode mode)
+std::unique_ptr DiskMemory::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &)
{
std::lock_guard lock(mutex);
diff --git a/src/Disks/DiskMemory.h b/src/Disks/DiskMemory.h
index fe108f53c68..726be8bc3b5 100644
--- a/src/Disks/DiskMemory.h
+++ b/src/Disks/DiskMemory.h
@@ -71,7 +71,8 @@ public:
std::unique_ptr writeFile(
const String & path,
size_t buf_size,
- WriteMode mode) override;
+ WriteMode mode,
+ const WriteSettings & settings) override;
void removeFile(const String & path) override;
void removeFileIfExists(const String & path) override;
diff --git a/src/Disks/DiskRestartProxy.cpp b/src/Disks/DiskRestartProxy.cpp
index 43011a4cf72..8045a0e8c72 100644
--- a/src/Disks/DiskRestartProxy.cpp
+++ b/src/Disks/DiskRestartProxy.cpp
@@ -214,10 +214,10 @@ std::unique_ptr DiskRestartProxy::readFile(
return std::make_unique(*this, std::move(impl));
}
-std::unique_ptr DiskRestartProxy::writeFile(const String & path, size_t buf_size, WriteMode mode)
+std::unique_ptr DiskRestartProxy::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings)
{
ReadLock lock (mutex);
- auto impl = DiskDecorator::writeFile(path, buf_size, mode);
+ auto impl = DiskDecorator::writeFile(path, buf_size, mode, settings);
return std::make_unique(*this, std::move(impl));
}
@@ -305,6 +305,24 @@ bool DiskRestartProxy::checkUniqueId(const String & id) const
return DiskDecorator::checkUniqueId(id);
}
+String DiskRestartProxy::getCacheBasePath() const
+{
+ ReadLock lock (mutex);
+ return DiskDecorator::getCacheBasePath();
+}
+
+std::vector DiskRestartProxy::getRemotePaths(const String & path) const
+{
+ ReadLock lock (mutex);
+ return DiskDecorator::getRemotePaths(path);
+}
+
+void DiskRestartProxy::getRemotePathsRecursive(const String & path, std::vector & paths_map)
+{
+ ReadLock lock (mutex);
+ return DiskDecorator::getRemotePathsRecursive(path, paths_map);
+}
+
void DiskRestartProxy::restart()
{
/// Speed up processing unhealthy requests.
diff --git a/src/Disks/DiskRestartProxy.h b/src/Disks/DiskRestartProxy.h
index 30f553f4fe0..baa57386e68 100644
--- a/src/Disks/DiskRestartProxy.h
+++ b/src/Disks/DiskRestartProxy.h
@@ -48,7 +48,7 @@ public:
const ReadSettings & settings,
std::optional read_hint,
std::optional file_size) const override;
- std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode) override;
+ std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings) override;
void removeFile(const String & path) override;
void removeFileIfExists(const String & path) override;
void removeDirectory(const String & path) override;
@@ -63,6 +63,9 @@ public:
void truncateFile(const String & path, size_t size) override;
String getUniqueId(const String & path) const override;
bool checkUniqueId(const String & id) const override;
+ String getCacheBasePath() const override;
+ std::vector getRemotePaths(const String & path) const override;
+ void getRemotePathsRecursive(const String & path, std::vector & paths_map) override;
void restart();
diff --git a/src/Disks/DiskWebServer.cpp b/src/Disks/DiskWebServer.cpp
index f3039d9af2e..2f8929982e3 100644
--- a/src/Disks/DiskWebServer.cpp
+++ b/src/Disks/DiskWebServer.cpp
@@ -166,9 +166,9 @@ std::unique_ptr DiskWebServer::readFile(const String & p
remote_path = remote_path.string().substr(url.size());
RemoteMetadata meta(path, remote_path);
- meta.remote_fs_objects.emplace_back(std::make_pair(remote_path, iter->second.size));
+ meta.remote_fs_objects.emplace_back(remote_path, iter->second.size);
- auto web_impl = std::make_unique(path, url, meta, getContext(), read_settings);
+ auto web_impl = std::make_unique(url, meta.remote_fs_root_path, meta.remote_fs_objects, getContext(), read_settings);
if (read_settings.remote_fs_method == RemoteFSReadMethod::threadpool)
{
diff --git a/src/Disks/DiskWebServer.h b/src/Disks/DiskWebServer.h
index e2da0b2a1e1..6341b582174 100644
--- a/src/Disks/DiskWebServer.h
+++ b/src/Disks/DiskWebServer.h
@@ -77,7 +77,6 @@ public:
UInt64 getTotalSpace() const final override { return std::numeric_limits::max(); }
UInt64 getAvailableSpace() const final override { return std::numeric_limits::max(); }
-
UInt64 getUnreservedSpace() const final override { return std::numeric_limits::max(); }
/// Read-only part
@@ -100,7 +99,7 @@ public:
/// Write and modification part
- std::unique_ptr writeFile(const String &, size_t, WriteMode) override
+ std::unique_ptr writeFile(const String &, size_t, WriteMode, const WriteSettings &) override
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Disk {} is read-only", getName());
}
@@ -165,6 +164,10 @@ public:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Disk {} is read-only", getName());
}
+ std::vector getRemotePaths(const String &) const override { return {}; }
+
+ void getRemotePathsRecursive(const String &, std::vector &) override {}
+
/// Create part
void createFile(const String &) final override {}
diff --git a/src/Disks/HDFS/DiskHDFS.cpp b/src/Disks/HDFS/DiskHDFS.cpp
index 7f60b219a4b..a3817a85a36 100644
--- a/src/Disks/HDFS/DiskHDFS.cpp
+++ b/src/Disks/HDFS/DiskHDFS.cpp
@@ -82,17 +82,17 @@ std::unique_ptr DiskHDFS::readFile(const String & path,
"Read from file by path: {}. Existing HDFS objects: {}",
backQuote(metadata_disk->getPath() + path), metadata.remote_fs_objects.size());
- auto hdfs_impl = std::make_unique(path, config, remote_fs_root_path, metadata, read_settings);
+ auto hdfs_impl = std::make_unique(config, remote_fs_root_path, remote_fs_root_path, metadata.remote_fs_objects, read_settings);
auto buf = std::make_unique(std::move(hdfs_impl));
return std::make_unique(std::move(buf), settings->min_bytes_for_seek);
}
-std::unique_ptr DiskHDFS::writeFile(const String & path, size_t buf_size, WriteMode mode)
+std::unique_ptr DiskHDFS::writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings &)
{
/// Path to store new HDFS object.
- auto file_name = getRandomName();
- auto hdfs_path = remote_fs_root_path + file_name;
+ std::string file_name = getRandomName();
+ std::string hdfs_path = fs::path(remote_fs_root_path) / file_name;
LOG_TRACE(log, "{} to file by path: {}. HDFS path: {}", mode == WriteMode::Rewrite ? "Write" : "Append",
backQuote(metadata_disk->getPath() + path), hdfs_path);
@@ -106,7 +106,7 @@ std::unique_ptr DiskHDFS::writeFile(const String & path
readOrCreateUpdateAndStoreMetadata(path, mode, false, [file_name, count] (Metadata & metadata) { metadata.addObject(file_name, count); return true; });
};
- return std::make_unique(std::move(hdfs_buffer), std::move(create_metadata_callback), path);
+ return std::make_unique(std::move(hdfs_buffer), std::move(create_metadata_callback), hdfs_path);
}
diff --git a/src/Disks/HDFS/DiskHDFS.h b/src/Disks/HDFS/DiskHDFS.h
index 23a108507b4..eba58101bc4 100644
--- a/src/Disks/HDFS/DiskHDFS.h
+++ b/src/Disks/HDFS/DiskHDFS.h
@@ -60,7 +60,7 @@ public:
std::optional read_hint,
std::optional file_size) const override;
- std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode) override;
+ std::unique_ptr writeFile(const String & path, size_t buf_size, WriteMode mode, const WriteSettings & settings) override;
void removeFromRemoteFS(RemoteFSPathKeeperPtr fs_paths_keeper) override;
diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h
index 4fa73b8eba8..81cdf47e1fb 100644
--- a/src/Disks/IDisk.h
+++ b/src/Disks/IDisk.h
@@ -9,6 +9,7 @@
#include
#include
#include
+#include
#include
#include
@@ -31,6 +32,11 @@ namespace Poco
namespace DB
{
+namespace ErrorCodes
+{
+ extern const int NOT_IMPLEMENTED;
+}
+
class IDiskDirectoryIterator;
using DiskDirectoryIteratorPtr = std::unique_ptr;
@@ -168,7 +174,8 @@ public:
virtual std::unique_ptr writeFile( /// NOLINT
const String & path,
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
- WriteMode mode = WriteMode::Rewrite) = 0;
+ WriteMode mode = WriteMode::Rewrite,
+ const WriteSettings & settings = {}) = 0;
/// Remove file. Throws exception if file doesn't exists or it's a directory.
virtual void removeFile(const String & path) = 0;
@@ -197,6 +204,24 @@ public:
/// Second bool param is a flag to remove (true) or keep (false) shared data on S3
virtual void removeSharedFileIfExists(const String & path, bool) { removeFileIfExists(path); }
+
+ virtual String getCacheBasePath() const { return ""; }
+
+ /// Returns a list of paths because for Log family engines there might be
+ /// multiple files in remote fs for single clickhouse file.
+ virtual std::vector getRemotePaths(const String &) const
+ {
+ throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getRemotePaths() not implemented for disk: {}`", getType());
+ }
+
+ /// For one local path there might be multiple remote paths in case of Log family engines.
+ using LocalPathWithRemotePaths = std::pair>;
+
+ virtual void getRemotePathsRecursive(const String &, std::vector &)
+ {
+ throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `getRemotePathsRecursive() not implemented for disk: {}`", getType());
+ }
+
struct RemoveRequest
{
String path;
diff --git a/src/Disks/IDiskRemote.cpp b/src/Disks/IDiskRemote.cpp
index fa4189abc53..b475ae1ee94 100644
--- a/src/Disks/IDiskRemote.cpp
+++ b/src/Disks/IDiskRemote.cpp
@@ -122,7 +122,8 @@ void IDiskRemote::Metadata::load()
remote_fs_object_path = remote_fs_object_path.substr(remote_fs_root_path.size());
}
assertChar('\n', *buf);
- remote_fs_objects[i] = {remote_fs_object_path, remote_fs_object_size};
+ remote_fs_objects[i].relative_path = remote_fs_object_path;
+ remote_fs_objects[i].bytes_size = remote_fs_object_size;
}
readIntText(ref_count, *buf);
@@ -136,13 +137,15 @@ void IDiskRemote::Metadata::load()
}
catch (Exception & e)
{
+ tryLogCurrentException(__PRETTY_FUNCTION__);
+
if (e.code() == ErrorCodes::UNKNOWN_FORMAT)
throw;
if (e.code() == ErrorCodes::MEMORY_LIMIT_EXCEEDED)
throw;
- throw Exception("Failed to read metadata file", e, ErrorCodes::UNKNOWN_FORMAT);
+ throw Exception("Failed to read metadata file: " + metadata_file_path, e, ErrorCodes::UNKNOWN_FORMAT);
}
}
@@ -341,6 +344,30 @@ void IDiskRemote::removeMetadataRecursive(const String & path, RemoteFSPathKeepe
}
}
+std::vector IDiskRemote::getRemotePaths(const String & local_path) const
+{
+ auto metadata = readMetadata(local_path);
+
+ std::vector remote_paths;
+ for (const auto & [remote_path, _] : metadata.remote_fs_objects)
+ remote_paths.push_back(remote_path);
+
+ return remote_paths;
+}
+
+void IDiskRemote::getRemotePathsRecursive(const String & local_path, std::vector & paths_map)
+{
+ if (metadata_disk->isFile(local_path))
+ {
+ paths_map.emplace_back(local_path, getRemotePaths(local_path));
+ }
+ else
+ {
+ for (auto it = iterateDirectory(local_path); it->isValid(); it->next())
+ IDiskRemote::getRemotePathsRecursive(fs::path(local_path) / it->name(), paths_map);
+ }
+}
+
DiskPtr DiskRemoteReservation::getDisk(size_t i) const
{
if (i != 0)
@@ -348,7 +375,6 @@ DiskPtr DiskRemoteReservation::getDisk(size_t i) const
return disk;
}
-
void DiskRemoteReservation::update(UInt64 new_size)
{
std::lock_guard lock(disk->reservation_mutex);
@@ -402,6 +428,12 @@ IDiskRemote::IDiskRemote(
}
+String IDiskRemote::getCacheBasePath() const
+{
+ return cache ? cache->getBasePath() : "";
+}
+
+
bool IDiskRemote::exists(const String & path) const
{
return metadata_disk->exists(path);
@@ -607,7 +639,7 @@ String IDiskRemote::getUniqueId(const String & path) const
auto metadata = readMetadata(path);
String id;
if (!metadata.remote_fs_objects.empty())
- id = metadata.remote_fs_root_path + metadata.remote_fs_objects[0].first;
+ id = metadata.remote_fs_root_path + metadata.remote_fs_objects[0].relative_path;
return id;
}
diff --git a/src/Disks/IDiskRemote.h b/src/Disks/IDiskRemote.h
index 82e76b8f68d..aa78468c7bb 100644
--- a/src/Disks/IDiskRemote.h
+++ b/src/Disks/IDiskRemote.h
@@ -13,7 +13,6 @@
#include
#include
-
namespace CurrentMetrics
{
extern const Metric DiskSpaceReservedForMerge;
@@ -22,6 +21,24 @@ namespace CurrentMetrics
namespace DB
{
+/// Path to blob with it's size
+struct BlobPathWithSize
+{
+ std::string relative_path;
+ uint64_t bytes_size;
+
+ BlobPathWithSize() = default;
+ BlobPathWithSize(const BlobPathWithSize & other) = default;
+
+ BlobPathWithSize(const std::string & relative_path_, uint64_t bytes_size_)
+ : relative_path(relative_path_)
+ , bytes_size(bytes_size_)
+ {}
+};
+
+/// List of blobs with their sizes
+using BlobsPathToSize = std::vector;
+
/// Helper class to collect paths into chunks of maximum size.
/// For s3 it is Aws::vector, for hdfs it is std::vector.
class RemoteFSPathKeeper
@@ -66,6 +83,12 @@ public:
const String & getPath() const final override { return metadata_disk->getPath(); }
+ String getCacheBasePath() const final override;
+
+ std::vector getRemotePaths(const String & local_path) const final override;
+
+ void getRemotePathsRecursive(const String & local_path, std::vector & paths_map) override;
+
/// Methods for working with metadata. For some operations (like hardlink
/// creation) metadata can be updated concurrently from multiple threads
/// (file actually rewritten on disk). So additional RW lock is required for
@@ -163,6 +186,7 @@ protected:
const String remote_fs_root_path;
DiskPtr metadata_disk;
+
FileCachePtr cache;
private:
@@ -184,10 +208,8 @@ using RemoteDiskPtr = std::shared_ptr;
/// Minimum info, required to be passed to ReadIndirectBufferFromRemoteFS
struct RemoteMetadata
{
- using PathAndSize = std::pair;
-
/// Remote FS objects paths and their sizes.
- std::vector remote_fs_objects;
+ std::vector remote_fs_objects;
/// URI
const String & remote_fs_root_path;
diff --git a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp
index de671e58687..16c1dd54f9f 100644
--- a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp
+++ b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp
@@ -122,10 +122,25 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getReadBufferForFileSegment(
{
auto range = file_segment->range();
- size_t wait_download_max_tries = settings.remote_fs_cache_max_wait_sec;
+ size_t wait_download_max_tries = settings.filesystem_cache_max_wait_sec;
size_t wait_download_tries = 0;
auto download_state = file_segment->state();
+
+ if (settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache)
+ {
+ if (download_state == FileSegment::State::DOWNLOADED)
+ {
+ read_type = ReadType::CACHED;
+ return getCacheReadBuffer(range.left);
+ }
+ else
+ {
+ read_type = ReadType::REMOTE_FS_READ_BYPASS_CACHE;
+ return getRemoteFSReadBuffer(file_segment, read_type);
+ }
+ }
+
while (true)
{
switch (download_state)
@@ -375,6 +390,9 @@ bool CachedReadBufferFromRemoteFS::completeFileSegmentAndGetNext()
implementation_buffer = getImplementationBuffer(*current_file_segment_it);
+ if (read_type == ReadType::CACHED)
+ (*current_file_segment_it)->incrementHitsCount();
+
LOG_TEST(log, "New segment: {}", (*current_file_segment_it)->range().toString());
return true;
}
@@ -559,9 +577,6 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
{
last_caller_id = FileSegment::getCallerId();
- if (IFileCache::shouldBypassCache())
- throw Exception(ErrorCodes::LOGICAL_ERROR, "Using cache when not allowed");
-
if (!initialized)
initialize(file_offset_of_buffer_end, getTotalSizeToRead());
@@ -606,6 +621,9 @@ bool CachedReadBufferFromRemoteFS::nextImplStep()
else
{
implementation_buffer = getImplementationBuffer(*current_file_segment_it);
+
+ if (read_type == ReadType::CACHED)
+ (*current_file_segment_it)->incrementHitsCount();
}
assert(!internal_buffer.empty());
diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp
index 0d50b24f7a5..16a57b83771 100644
--- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp
+++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp
@@ -38,12 +38,12 @@ SeekableReadBufferPtr ReadBufferFromS3Gather::createImplementationBuffer(const S
current_path = path;
auto cache = settings.remote_fs_cache;
- bool with_cache = cache && settings.remote_fs_enable_cache && !IFileCache::shouldBypassCache();
+ bool with_cache = cache && settings.enable_filesystem_cache;
auto remote_file_reader_creator = [=, this]()
{
return std::make_unique(
- client_ptr, bucket, fs::path(metadata.remote_fs_root_path) / path, max_single_read_retries,
+ client_ptr, bucket, fs::path(common_path_prefix) / path, max_single_read_retries,
settings, /* use_external_buffer */true, /* offset */ 0, read_until_position, /* restricted_seek */true);
};
@@ -83,11 +83,14 @@ SeekableReadBufferPtr ReadBufferFromHDFSGather::createImplementationBuffer(const
#endif
-ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather(const RemoteMetadata & metadata_, const ReadSettings & settings_, const String & path_)
+ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather(
+ const std::string & common_path_prefix_,
+ const BlobsPathToSize & blobs_to_read_,
+ const ReadSettings & settings_)
: ReadBuffer(nullptr, 0)
- , metadata(metadata_)
+ , common_path_prefix(common_path_prefix_)
+ , blobs_to_read(blobs_to_read_)
, settings(settings_)
- , canonical_path(path_)
, log(&Poco::Logger::get("ReadBufferFromRemoteFSGather"))
{
}
@@ -119,9 +122,9 @@ void ReadBufferFromRemoteFSGather::initialize()
{
/// One clickhouse file can be split into multiple files in remote fs.
auto current_buf_offset = file_offset_of_buffer_end;
- for (size_t i = 0; i < metadata.remote_fs_objects.size(); ++i)
+ for (size_t i = 0; i < blobs_to_read.size(); ++i)
{
- const auto & [file_path, size] = metadata.remote_fs_objects[i];
+ const auto & [file_path, size] = blobs_to_read[i];
if (size > current_buf_offset)
{
@@ -138,7 +141,7 @@ void ReadBufferFromRemoteFSGather::initialize()
current_buf_offset -= size;
}
- current_buf_idx = metadata.remote_fs_objects.size();
+ current_buf_idx = blobs_to_read.size();
current_buf = nullptr;
}
@@ -168,12 +171,12 @@ bool ReadBufferFromRemoteFSGather::nextImpl()
bool ReadBufferFromRemoteFSGather::moveToNextBuffer()
{
/// If there is no available buffers - nothing to read.
- if (current_buf_idx + 1 >= metadata.remote_fs_objects.size())
+ if (current_buf_idx + 1 >= blobs_to_read.size())
return false;
++current_buf_idx;
- const auto & [path, size] = metadata.remote_fs_objects[current_buf_idx];
+ const auto & [path, size] = blobs_to_read[current_buf_idx];
current_buf = createImplementationBuffer(path, size);
return true;
@@ -202,7 +205,7 @@ bool ReadBufferFromRemoteFSGather::readImpl()
if (!result)
result = current_buf->next();
- if (metadata.remote_fs_objects.size() == 1)
+ if (blobs_to_read.size() == 1)
{
file_offset_of_buffer_end = current_buf->getFileOffsetOfBufferEnd();
}
@@ -255,8 +258,8 @@ String ReadBufferFromRemoteFSGather::getFileName() const
size_t ReadBufferFromRemoteFSGather::getFileSize() const
{
size_t size = 0;
- for (const auto & object : metadata.remote_fs_objects)
- size += object.second;
+ for (const auto & object : blobs_to_read)
+ size += object.bytes_size;
return size;
}
diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h
index 25bfe0b7e16..d12513cba1f 100644
--- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h
+++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h
@@ -26,9 +26,9 @@ friend class ReadIndirectBufferFromRemoteFS;
public:
ReadBufferFromRemoteFSGather(
- const RemoteMetadata & metadata_,
- const ReadSettings & settings_,
- const String & path_);
+ const std::string & common_path_prefix_,
+ const BlobsPathToSize & blobs_to_read_,
+ const ReadSettings & settings_);
String getFileName() const;
@@ -57,7 +57,9 @@ public:
protected:
virtual SeekableReadBufferPtr createImplementationBuffer(const String & path, size_t file_size) = 0;
- RemoteMetadata metadata;
+ std::string common_path_prefix;
+
+ BlobsPathToSize blobs_to_read;
ReadSettings settings;
@@ -89,8 +91,6 @@ private:
*/
size_t bytes_to_ignore = 0;
- String canonical_path;
-
Poco::Logger * log;
};
@@ -101,13 +101,13 @@ class ReadBufferFromS3Gather final : public ReadBufferFromRemoteFSGather
{
public:
ReadBufferFromS3Gather(
- const String & path_,
std::shared_ptr client_ptr_,
const String & bucket_,
- IDiskRemote::Metadata metadata_,
+ const std::string & common_path_prefix_,
+ const BlobsPathToSize & blobs_to_read_,
size_t max_single_read_retries_,
const ReadSettings & settings_)
- : ReadBufferFromRemoteFSGather(metadata_, settings_, path_)
+ : ReadBufferFromRemoteFSGather(common_path_prefix_, blobs_to_read_, settings_)
, client_ptr(std::move(client_ptr_))
, bucket(bucket_)
, max_single_read_retries(max_single_read_retries_)
@@ -130,13 +130,13 @@ class ReadBufferFromAzureBlobStorageGather final : public ReadBufferFromRemoteFS
{
public:
ReadBufferFromAzureBlobStorageGather(
- const String & path_,
std::shared_ptr blob_container_client_,
- IDiskRemote::Metadata metadata_,
+ const std::string & common_path_prefix_,
+ const BlobsPathToSize & blobs_to_read_,
size_t max_single_read_retries_,
size_t max_single_download_retries_,
const ReadSettings & settings_)
- : ReadBufferFromRemoteFSGather(metadata_, settings_, path_)
+ : ReadBufferFromRemoteFSGather(common_path_prefix_, blobs_to_read_, settings_)
, blob_container_client(blob_container_client_)
, max_single_read_retries(max_single_read_retries_)
, max_single_download_retries(max_single_download_retries_)
@@ -157,12 +157,12 @@ class ReadBufferFromWebServerGather final : public ReadBufferFromRemoteFSGather
{
public:
ReadBufferFromWebServerGather(
- const String & path_,
const String & uri_,
- RemoteMetadata metadata_,
+ const std::string & common_path_prefix_,
+ const BlobsPathToSize & blobs_to_read_,
ContextPtr context_,
const ReadSettings & settings_)
- : ReadBufferFromRemoteFSGather(metadata_, settings_, path_)
+ : ReadBufferFromRemoteFSGather(common_path_prefix_, blobs_to_read_, settings_)
, uri(uri_)
, context(context_)
{
@@ -182,12 +182,12 @@ class ReadBufferFromHDFSGather final : public ReadBufferFromRemoteFSGather
{
public:
ReadBufferFromHDFSGather(
- const String & path_,
const Poco::Util::AbstractConfiguration & config_,
const String & hdfs_uri_,
- IDiskRemote::Metadata metadata_,
+ const std::string & common_path_prefix_,
+ const BlobsPathToSize & blobs_to_read_,
const ReadSettings & settings_)
- : ReadBufferFromRemoteFSGather(metadata_, settings_, path_)
+ : ReadBufferFromRemoteFSGather(common_path_prefix_, blobs_to_read_, settings_)
, config(config_)
{
const size_t begin_of_path = hdfs_uri_.find('/', hdfs_uri_.find("//") + 2);
diff --git a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp
index af545d15c0e..b1ae42d03d6 100644
--- a/src/Disks/IO/ThreadPoolRemoteFSReader.cpp
+++ b/src/Disks/IO/ThreadPoolRemoteFSReader.cpp
@@ -54,14 +54,14 @@ std::future ThreadPoolRemoteFSReader::submit(Reques
{
ThreadStatus thread_status;
- /// Save query context if any, because cache implementation needs it.
- if (query_context)
- thread_status.attachQueryContext(query_context);
-
/// To be able to pass ProfileEvents.
if (running_group)
thread_status.attachQuery(running_group);
+ /// Save query context if any, because cache implementation needs it.
+ if (query_context)
+ thread_status.attachQueryContext(query_context);
+
setThreadName("VFSRead");
CurrentMetrics::Increment metric_increment{CurrentMetrics::Read};
@@ -83,12 +83,11 @@ std::future ThreadPoolRemoteFSReader::submit(Reques
watch.stop();
- if (running_group)
- CurrentThread::detachQuery();
-
ProfileEvents::increment(ProfileEvents::RemoteFSReadMicroseconds, watch.elapsedMicroseconds());
ProfileEvents::increment(ProfileEvents::RemoteFSReadBytes, result.offset ? result.size - result.offset : result.size);
+ thread_status.detachQuery(/* if_not_detached */true);
+
return Result{ .size = result.size, .offset = result.offset };
});
diff --git a/src/Disks/IO/WriteIndirectBufferFromRemoteFS.cpp b/src/Disks/IO/WriteIndirectBufferFromRemoteFS.cpp
index 9b604341da9..dca2fb17ba7 100644
--- a/src/Disks/IO/WriteIndirectBufferFromRemoteFS.cpp
+++ b/src/Disks/IO/WriteIndirectBufferFromRemoteFS.cpp
@@ -12,10 +12,10 @@ namespace DB
WriteIndirectBufferFromRemoteFS::WriteIndirectBufferFromRemoteFS(
std::unique_ptr impl_,
CreateMetadataCallback && create_callback_,
- const String & metadata_file_path_)
+ const String & remote_path_)
: WriteBufferFromFileDecorator(std::move(impl_))
, create_metadata_callback(std::move(create_callback_))
- , metadata_file_path(metadata_file_path_)
+ , remote_path(remote_path_)
{
}
diff --git a/src/Disks/IO/WriteIndirectBufferFromRemoteFS.h b/src/Disks/IO/WriteIndirectBufferFromRemoteFS.h
index 25a93e2fe07..84bd2b99c7e 100644
--- a/src/Disks/IO/WriteIndirectBufferFromRemoteFS.h
+++ b/src/Disks/IO/WriteIndirectBufferFromRemoteFS.h
@@ -18,17 +18,17 @@ public:
WriteIndirectBufferFromRemoteFS(
std::unique_ptr impl_,
CreateMetadataCallback && create_callback_,
- const String & metadata_file_path_);
+ const String & remote_path_);
~WriteIndirectBufferFromRemoteFS() override;
- String getFileName() const override { return metadata_file_path; }
+ String getFileName() const override { return remote_path; }
private:
void finalizeImpl() override;
CreateMetadataCallback create_metadata_callback;
- String metadata_file_path;
+ String remote_path;
};
}
diff --git a/src/Disks/RemoteDisksCommon.cpp b/src/Disks/RemoteDisksCommon.cpp
index 36f2aed3e7c..da6ffed5f11 100644
--- a/src/Disks/RemoteDisksCommon.cpp
+++ b/src/Disks/RemoteDisksCommon.cpp
@@ -2,6 +2,7 @@
#include
#include
#include
+#include