Merge remote-tracking branch 'ck/master' into hive_max_partition_query_check

This commit is contained in:
lgbo-ustc 2022-05-17 15:42:10 +08:00
commit 4d64678963
147 changed files with 4904 additions and 1897 deletions

View File

@ -5,38 +5,21 @@ Checks: '*,
-android-*,
-bugprone-assert-side-effect,
-bugprone-branch-clone,
-bugprone-dynamic-static-initializers,
-bugprone-easily-swappable-parameters,
-bugprone-exception-escape,
-bugprone-forwarding-reference-overload,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-lambda-function-name,
-bugprone-misplaced-widening-cast,
-bugprone-narrowing-conversions,
-bugprone-no-escape,
-bugprone-not-null-terminated-result,
-bugprone-signal-handler,
-bugprone-spuriously-wake-up-functions,
-bugprone-suspicious-semicolon,
-bugprone-unhandled-exception-at-new,
-bugprone-unhandled-self-assignment,
-cert-dcl16-c,
-cert-dcl37-c,
-cert-dcl51-cpp,
-cert-dcl58-cpp,
-cert-err58-cpp,
-cert-err60-cpp,
-cert-msc32-c,
-cert-msc51-cpp,
-cert-oop54-cpp,
-cert-oop57-cpp,
-cert-oop58-cpp,
-clang-analyzer-core.DynamicTypePropagation,
-clang-analyzer-core.uninitialized.CapturedBlockVariable,
-clang-analyzer-optin.performance.Padding,
-clang-analyzer-optin.portability.UnixAPI,
@ -53,7 +36,6 @@ Checks: '*,
-fuchsia-*,
-google-build-using-namespace,
-google-global-names-in-headers,
-google-readability-braces-around-statements,
-google-readability-function-size,
-google-readability-namespace-comments,
@ -63,7 +45,6 @@ Checks: '*,
-hicpp-avoid-c-arrays,
-hicpp-avoid-goto,
-hicpp-braces-around-statements,
-hicpp-deprecated-headers,
-hicpp-explicit-conversions,
-hicpp-function-size,
-hicpp-invalid-access-moved,
@ -79,7 +60,6 @@ Checks: '*,
-hicpp-uppercase-literal-suffix,
-hicpp-use-auto,
-hicpp-use-emplace,
-hicpp-use-equals-default,
-hicpp-use-noexcept,
-hicpp-use-override,
-hicpp-vararg,
@ -90,40 +70,27 @@ Checks: '*,
-openmp-*,
-misc-definitions-in-headers,
-misc-new-delete-overloads,
-misc-no-recursion,
-misc-non-copyable-objects,
-misc-non-private-member-variables-in-classes,
-misc-static-assert,
-modernize-avoid-c-arrays,
-modernize-concat-nested-namespaces,
-modernize-deprecated-headers,
-modernize-deprecated-ios-base-aliases,
-modernize-pass-by-value,
-modernize-replace-auto-ptr,
-modernize-replace-disallow-copy-and-assign-macro,
-modernize-return-braced-init-list,
-modernize-unary-static-assert,
-modernize-use-auto,
-modernize-use-default-member-init,
-modernize-use-emplace,
-modernize-use-equals-default,
-modernize-use-nodiscard,
-modernize-use-noexcept,
-modernize-use-override,
-modernize-use-trailing-return-type,
-performance-inefficient-string-concatenation,
-performance-no-int-to-ptr,
-performance-type-promotion-in-math-fn,
-performance-trivially-destructible,
-performance-unnecessary-value-param,
-portability-simd-intrinsics,
-readability-convert-member-functions-to-static,
-readability-braces-around-statements,
-readability-else-after-return,
-readability-function-cognitive-complexity,
@ -131,9 +98,7 @@ Checks: '*,
-readability-implicit-bool-conversion,
-readability-isolate-declaration,
-readability-magic-numbers,
-readability-misleading-indentation,
-readability-named-parameter,
-readability-qualified-auto,
-readability-redundant-declaration,
-readability-static-accessed-through-instance,
-readability-suspicious-call-argument,

View File

@ -38,6 +38,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -112,8 +112,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#########################################################################################
#################################### ORDINARY BUILDS ####################################
@ -159,8 +161,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAarch64:
needs: [DockerHubPush]
@ -203,8 +207,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan:
needs: [DockerHubPush]
@ -247,8 +253,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan:
needs: [DockerHubPush]
@ -291,8 +299,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug:
needs: [DockerHubPush]
@ -335,8 +345,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
@ -380,8 +392,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
@ -418,8 +432,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
@ -456,8 +472,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
@ -497,8 +515,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -534,8 +554,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -122,8 +122,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
DocsCheck:
needs: DockerHubPush
@ -153,8 +155,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -116,6 +116,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -39,6 +39,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -112,8 +112,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
CompatibilityCheck:
needs: [BuilderDebRelease]
@ -144,8 +146,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
SplitBuildSmokeTest:
needs: [BuilderDebSplitted]
@ -176,8 +180,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#########################################################################################
#################################### ORDINARY BUILDS ####################################
@ -225,8 +231,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
BuilderDebAarch64:
needs: [DockerHubPush]
@ -267,8 +275,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderPerformance:
needs: DockerHubPush
@ -313,8 +323,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinRelease:
needs: [DockerHubPush]
@ -359,8 +371,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
# BuilderBinGCC:
# needs: [DockerHubPush]
@ -403,8 +417,10 @@ jobs:
# - name: Cleanup
# if: always()
# run: |
# docker kill "$(docker ps -q)" ||:
# docker rm -f "$(docker ps -a -q)" ||:
# # shellcheck disable=SC2046
# docker kill $(docker ps -q) ||:
# # shellcheck disable=SC2046
# docker rm -f $(docker ps -a -q) ||:
# sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan:
needs: [DockerHubPush]
@ -447,8 +463,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebUBsan:
needs: [DockerHubPush]
@ -491,8 +509,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan:
needs: [DockerHubPush]
@ -535,8 +555,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebMsan:
needs: [DockerHubPush]
@ -579,8 +601,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug:
needs: [DockerHubPush]
@ -623,8 +647,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
##########################################################################################
##################################### SPECIAL BUILDS #####################################
@ -670,8 +696,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinTidy:
needs: [DockerHubPush]
@ -714,8 +742,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinDarwin:
needs: [DockerHubPush]
@ -760,8 +790,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinAarch64:
needs: [DockerHubPush]
@ -806,8 +838,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinFreeBSD:
needs: [DockerHubPush]
@ -852,8 +886,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinDarwinAarch64:
needs: [DockerHubPush]
@ -898,8 +934,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinPPC64:
needs: [DockerHubPush]
@ -944,8 +982,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
@ -972,8 +1012,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
@ -1021,8 +1063,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
BuilderSpecialReport:
needs:
@ -1066,8 +1110,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
@ -1104,8 +1150,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseOrdinary:
needs: [BuilderDebRelease]
@ -1139,8 +1187,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3:
needs: [BuilderDebRelease]
@ -1174,8 +1224,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
@ -1209,8 +1261,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan0:
needs: [BuilderDebAsan]
@ -1246,8 +1300,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan1:
needs: [BuilderDebAsan]
@ -1283,8 +1339,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan0:
needs: [BuilderDebTsan]
@ -1320,8 +1378,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan1:
needs: [BuilderDebTsan]
@ -1357,8 +1417,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan2:
needs: [BuilderDebTsan]
@ -1394,8 +1456,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
@ -1429,8 +1493,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan0:
needs: [BuilderDebMsan]
@ -1466,8 +1532,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan1:
needs: [BuilderDebMsan]
@ -1503,8 +1571,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan2:
needs: [BuilderDebMsan]
@ -1540,8 +1610,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug0:
needs: [BuilderDebDebug]
@ -1577,8 +1649,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug1:
needs: [BuilderDebDebug]
@ -1614,8 +1688,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug2:
needs: [BuilderDebDebug]
@ -1651,8 +1727,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
@ -1689,8 +1767,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestReleaseDatabaseOrdinary:
needs: [BuilderDebRelease]
@ -1724,8 +1804,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
@ -1759,8 +1841,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
@ -1794,8 +1878,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
@ -1829,8 +1915,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
@ -1864,8 +1952,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
@ -1899,8 +1989,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
@ -1934,8 +2026,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
@ -1971,8 +2065,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestTsan:
needs: [BuilderDebTsan]
@ -2009,8 +2105,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestMsan:
needs: [BuilderDebMsan]
@ -2043,8 +2141,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestUBsan:
needs: [BuilderDebUBsan]
@ -2077,8 +2177,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestDebug:
needs: [BuilderDebDebug]
@ -2111,8 +2213,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -2150,8 +2254,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan1:
needs: [BuilderDebAsan]
@ -2186,8 +2292,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan2:
needs: [BuilderDebAsan]
@ -2222,8 +2330,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0:
needs: [BuilderDebTsan]
@ -2258,8 +2368,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan1:
needs: [BuilderDebTsan]
@ -2294,8 +2406,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan2:
needs: [BuilderDebTsan]
@ -2330,8 +2444,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan3:
needs: [BuilderDebTsan]
@ -2366,8 +2482,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease0:
needs: [BuilderDebRelease]
@ -2402,8 +2520,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease1:
needs: [BuilderDebRelease]
@ -2438,8 +2558,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
##################################### AST FUZZERS ############################################
@ -2475,8 +2597,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestTsan:
needs: [BuilderDebTsan]
@ -2509,8 +2633,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestUBSan:
needs: [BuilderDebUBsan]
@ -2543,8 +2669,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestMSan:
needs: [BuilderDebMsan]
@ -2577,8 +2705,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestDebug:
needs: [BuilderDebDebug]
@ -2611,8 +2741,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
#################################### UNIT TESTS #############################################
@ -2648,8 +2780,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsReleaseClang:
needs: [BuilderBinRelease]
@ -2682,8 +2816,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
# UnitTestsReleaseGCC:
# needs: [BuilderBinGCC]
@ -2716,8 +2852,10 @@ jobs:
# - name: Cleanup
# if: always()
# run: |
# docker kill "$(docker ps -q)" ||:
# docker rm -f "$(docker ps -a -q)" ||:
# # shellcheck disable=SC2046
# docker kill $(docker ps -q) ||:
# # shellcheck disable=SC2046
# docker rm -f $(docker ps -a -q) ||:
# sudo rm -fr "$TEMP_PATH"
UnitTestsTsan:
needs: [BuilderDebTsan]
@ -2750,8 +2888,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsMsan:
needs: [BuilderDebMsan]
@ -2784,8 +2924,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsUBsan:
needs: [BuilderDebUBsan]
@ -2818,8 +2960,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
#################################### PERFORMANCE TESTS ######################################
@ -2857,8 +3001,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison1:
needs: [BuilderPerformance]
@ -2893,8 +3039,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison2:
needs: [BuilderPerformance]
@ -2929,8 +3077,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison3:
needs: [BuilderPerformance]
@ -2965,8 +3115,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -118,6 +118,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"

View File

@ -137,8 +137,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FastTest:
needs: DockerHubPush
@ -171,8 +173,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
CompatibilityCheck:
needs: [BuilderDebRelease]
@ -203,8 +207,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
SplitBuildSmokeTest:
needs: [BuilderDebSplitted]
@ -235,8 +241,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#########################################################################################
#################################### ORDINARY BUILDS ####################################
@ -282,8 +290,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
BuilderPerformance:
needs: [DockerHubPush, FastTest]
@ -328,8 +338,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinRelease:
needs: [DockerHubPush, FastTest]
@ -372,8 +384,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
# BuilderBinGCC:
# needs: [DockerHubPush, FastTest]
@ -416,8 +430,10 @@ jobs:
# - name: Cleanup
# if: always()
# run: |
# docker kill "$(docker ps -q)" ||:
# docker rm -f "$(docker ps -a -q)" ||:
# # shellcheck disable=SC2046
# docker kill $(docker ps -q) ||:
# # shellcheck disable=SC2046
# docker rm -f $(docker ps -a -q) ||:
# sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAarch64:
needs: [DockerHubPush, FastTest]
@ -460,8 +476,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan:
needs: [DockerHubPush, FastTest]
@ -504,8 +522,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebUBsan:
needs: [DockerHubPush, FastTest]
@ -548,8 +568,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan:
needs: [DockerHubPush, FastTest]
@ -592,8 +614,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebMsan:
needs: [DockerHubPush, FastTest]
@ -636,8 +660,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug:
needs: [DockerHubPush, FastTest]
@ -680,8 +706,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
##########################################################################################
##################################### SPECIAL BUILDS #####################################
@ -727,8 +755,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinTidy:
needs: [DockerHubPush, FastTest]
@ -771,8 +801,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinDarwin:
needs: [DockerHubPush, FastTest]
@ -815,8 +847,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinAarch64:
needs: [DockerHubPush, FastTest]
@ -859,8 +893,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinFreeBSD:
needs: [DockerHubPush, FastTest]
@ -903,8 +939,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinDarwinAarch64:
needs: [DockerHubPush, FastTest]
@ -947,8 +985,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinPPC64:
needs: [DockerHubPush, FastTest]
@ -991,8 +1031,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
@ -1019,8 +1061,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
@ -1068,8 +1112,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
BuilderSpecialReport:
needs:
@ -1114,8 +1160,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
@ -1152,8 +1200,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseReplicated0:
needs: [BuilderDebRelease]
@ -1189,8 +1239,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseReplicated1:
needs: [BuilderDebRelease]
@ -1226,8 +1278,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseWideParts:
needs: [BuilderDebRelease]
@ -1261,8 +1315,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3:
needs: [BuilderDebRelease]
@ -1296,8 +1352,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
@ -1331,8 +1389,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan0:
needs: [BuilderDebAsan]
@ -1368,8 +1428,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan1:
needs: [BuilderDebAsan]
@ -1405,8 +1467,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan0:
needs: [BuilderDebTsan]
@ -1442,8 +1506,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan1:
needs: [BuilderDebTsan]
@ -1479,8 +1545,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan2:
needs: [BuilderDebTsan]
@ -1516,8 +1584,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
@ -1551,8 +1621,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan0:
needs: [BuilderDebMsan]
@ -1588,8 +1660,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan1:
needs: [BuilderDebMsan]
@ -1625,8 +1699,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan2:
needs: [BuilderDebMsan]
@ -1662,8 +1738,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug0:
needs: [BuilderDebDebug]
@ -1699,8 +1777,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug1:
needs: [BuilderDebDebug]
@ -1736,8 +1816,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug2:
needs: [BuilderDebDebug]
@ -1773,8 +1855,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestFlakyCheck:
needs: [BuilderDebAsan]
@ -1808,8 +1892,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
TestsBugfixCheck:
runs-on: [self-hosted, stress-tester]
@ -1853,8 +1939,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
@ -1891,8 +1979,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
@ -1926,8 +2016,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
@ -1961,8 +2053,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
@ -1996,8 +2090,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
@ -2031,8 +2127,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
@ -2066,8 +2164,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
@ -2101,8 +2201,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
@ -2138,8 +2240,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestTsan:
needs: [BuilderDebTsan]
@ -2176,8 +2280,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestMsan:
needs: [BuilderDebMsan]
@ -2210,8 +2316,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestUBsan:
needs: [BuilderDebUBsan]
@ -2244,8 +2352,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestDebug:
needs: [BuilderDebDebug]
@ -2278,8 +2388,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
##################################### AST FUZZERS ############################################
@ -2315,8 +2427,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestTsan:
needs: [BuilderDebTsan]
@ -2349,8 +2463,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestUBSan:
needs: [BuilderDebUBsan]
@ -2383,8 +2499,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestMSan:
needs: [BuilderDebMsan]
@ -2417,8 +2535,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestDebug:
needs: [BuilderDebDebug]
@ -2451,8 +2571,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -2490,8 +2612,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan1:
needs: [BuilderDebAsan]
@ -2526,8 +2650,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan2:
needs: [BuilderDebAsan]
@ -2562,8 +2688,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0:
needs: [BuilderDebTsan]
@ -2598,8 +2726,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan1:
needs: [BuilderDebTsan]
@ -2634,8 +2764,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan2:
needs: [BuilderDebTsan]
@ -2670,8 +2802,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan3:
needs: [BuilderDebTsan]
@ -2706,8 +2840,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease0:
needs: [BuilderDebRelease]
@ -2742,8 +2878,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease1:
needs: [BuilderDebRelease]
@ -2778,8 +2916,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsFlakyCheck:
needs: [BuilderDebAsan]
@ -2812,8 +2952,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
#################################### UNIT TESTS #############################################
@ -2849,8 +2991,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsReleaseClang:
needs: [BuilderBinRelease]
@ -2883,8 +3027,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
# UnitTestsReleaseGCC:
# needs: [BuilderBinGCC]
@ -2917,8 +3063,10 @@ jobs:
# - name: Cleanup
# if: always()
# run: |
# docker kill "$(docker ps -q)" ||:
# docker rm -f "$(docker ps -a -q)" ||:
# # shellcheck disable=SC2046
# docker kill $(docker ps -q) ||:
# # shellcheck disable=SC2046
# docker rm -f $(docker ps -a -q) ||:
# sudo rm -fr "$TEMP_PATH"
UnitTestsTsan:
needs: [BuilderDebTsan]
@ -2951,8 +3099,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsMsan:
needs: [BuilderDebMsan]
@ -2985,8 +3135,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsUBsan:
needs: [BuilderDebUBsan]
@ -3019,8 +3171,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
#################################### PERFORMANCE TESTS ######################################
@ -3058,8 +3212,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison1:
needs: [BuilderPerformance]
@ -3094,8 +3250,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison2:
needs: [BuilderPerformance]
@ -3130,8 +3288,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison3:
needs: [BuilderPerformance]
@ -3166,8 +3326,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -58,6 +58,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -103,8 +103,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#########################################################################################
#################################### ORDINARY BUILDS ####################################
@ -152,8 +154,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAarch64:
needs: [DockerHubPush]
@ -194,8 +198,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan:
needs: [DockerHubPush]
@ -238,8 +244,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebUBsan:
needs: [DockerHubPush]
@ -282,8 +290,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan:
needs: [DockerHubPush]
@ -326,8 +336,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebMsan:
needs: [DockerHubPush]
@ -370,8 +382,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug:
needs: [DockerHubPush]
@ -414,8 +428,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
@ -462,8 +478,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
@ -500,8 +518,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
@ -535,8 +555,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan0:
needs: [BuilderDebAsan]
@ -572,8 +594,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan1:
needs: [BuilderDebAsan]
@ -609,8 +633,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan0:
needs: [BuilderDebTsan]
@ -646,8 +672,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan1:
needs: [BuilderDebTsan]
@ -683,8 +711,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan2:
needs: [BuilderDebTsan]
@ -720,8 +750,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
@ -755,8 +787,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan0:
needs: [BuilderDebMsan]
@ -792,8 +826,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan1:
needs: [BuilderDebMsan]
@ -829,8 +865,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan2:
needs: [BuilderDebMsan]
@ -866,8 +904,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug0:
needs: [BuilderDebDebug]
@ -903,8 +943,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug1:
needs: [BuilderDebDebug]
@ -940,8 +982,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug2:
needs: [BuilderDebDebug]
@ -977,8 +1021,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
@ -1015,8 +1061,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
@ -1050,8 +1098,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
@ -1085,8 +1135,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
@ -1120,8 +1172,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
@ -1155,8 +1209,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
@ -1190,8 +1246,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
@ -1225,8 +1283,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
@ -1262,8 +1322,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestTsan:
needs: [BuilderDebTsan]
@ -1300,8 +1362,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestMsan:
needs: [BuilderDebMsan]
@ -1334,8 +1398,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestUBsan:
needs: [BuilderDebUBsan]
@ -1368,8 +1434,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestDebug:
needs: [BuilderDebDebug]
@ -1402,8 +1470,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -1441,8 +1511,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan1:
needs: [BuilderDebAsan]
@ -1477,8 +1549,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan2:
needs: [BuilderDebAsan]
@ -1513,8 +1587,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0:
needs: [BuilderDebTsan]
@ -1549,8 +1625,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan1:
needs: [BuilderDebTsan]
@ -1585,8 +1663,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan2:
needs: [BuilderDebTsan]
@ -1621,8 +1701,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan3:
needs: [BuilderDebTsan]
@ -1657,8 +1739,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease0:
needs: [BuilderDebRelease]
@ -1693,8 +1777,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease1:
needs: [BuilderDebRelease]
@ -1729,8 +1815,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -37,6 +37,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -1,5 +1,5 @@
#include <string>
#include <string.h>
#include <cstring>
#include <Poco/UTF8Encoding.h>
#include <Poco/NumberParser.h>
@ -12,7 +12,7 @@
#define JSON_MAX_DEPTH 100
POCO_IMPLEMENT_EXCEPTION(JSONException, Poco::Exception, "JSONException")
POCO_IMPLEMENT_EXCEPTION(JSONException, Poco::Exception, "JSONException") // NOLINT(cert-err60-cpp, modernize-use-noexcept)
/// Прочитать беззнаковое целое в простом формате из не-0-terminated строки.

View File

@ -5,7 +5,7 @@
#include <algorithm>
#include <cassert>
#include <string.h>
#include <cstring>
#include <unistd.h>
#include <sys/select.h>
#include <sys/time.h>

View File

@ -1,6 +1,6 @@
#include <base/demangle.h>
#include <stdlib.h>
#include <cstdlib>
#include <cxxabi.h>
static DemangleResult tryDemangle(const char * name, int & status)

View File

@ -3,7 +3,7 @@
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <errno.h>
#include <cerrno>
void * mremap_fallback(

View File

@ -169,9 +169,9 @@ obstacle to adoption, that text has been removed.
*/
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <cmath>
#include <cstdint>
#include <cstdio>
double preciseExp10(double x)
{

View File

@ -1,7 +1,7 @@
#include <base/sleep.h>
#include <time.h>
#include <errno.h>
#include <ctime>
#include <cerrno>
#if defined(OS_DARWIN)
#include <mach/mach.h>

View File

@ -19,7 +19,7 @@ The following tutorial is based on the Ubuntu Linux system. With appropriate cha
### Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja}
``` bash
$ sudo apt-get install git cmake python ninja-build
sudo apt-get install git cmake python ninja-build
```
Or cmake3 instead of cmake on older systems.
@ -37,8 +37,8 @@ For other Linux distribution - check the availability of the [prebuild packages]
#### Use the latest clang for Builds
``` bash
$ export CC=clang-14
$ export CXX=clang++-14
export CC=clang-14
export CXX=clang++-14
```
In this example we use version 14 that is the latest as of Feb 2022.
@ -48,23 +48,23 @@ Gcc can also be used though it is discouraged.
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
``` bash
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
git clone --recursive git@github.com:ClickHouse/ClickHouse.git
```
or
``` bash
$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
```
### Build ClickHouse {#build-clickhouse}
``` bash
$ cd ClickHouse
$ mkdir build
$ cd build
$ cmake ..
$ ninja
cd ClickHouse
mkdir build
cd build
cmake ..
ninja
```
To create an executable, run `ninja clickhouse`.
@ -114,13 +114,13 @@ make -j $(nproc)
Here is an example of how to build `clang` and all the llvm infrastructure from sources:
```
git clone git@github.com:llvm/llvm-project.git
mkdir llvm-build && cd llvm-build
cmake -DCMAKE_BUILD_TYPE:STRING=Release -DLLVM_ENABLE_PROJECTS=all ../llvm-project/llvm/
make -j16
sudo make install
hash clang
clang --version
git clone git@github.com:llvm/llvm-project.git
mkdir llvm-build && cd llvm-build
cmake -DCMAKE_BUILD_TYPE:STRING=Release -DLLVM_ENABLE_PROJECTS=all ../llvm-project/llvm/
make -j16
sudo make install
hash clang
clang --version
```
You can install the older clang like clang-11 from packages and then use it to build the new clang from sources.
@ -140,21 +140,21 @@ hash cmake
### Install Git {#install-git}
``` bash
$ sudo apt-get update
$ sudo apt-get install git python debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
sudo apt-get update
sudo apt-get install git python debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
```
### Checkout ClickHouse Sources {#checkout-clickhouse-sources-1}
``` bash
$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
$ cd ClickHouse
git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
cd ClickHouse
```
### Run Release Script {#run-release-script}
``` bash
$ ./release
./release
```
## You Dont Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}

View File

@ -189,6 +189,8 @@ Example:
- `_timestamp` — Timestamp of the message.
- `_timestamp_ms` — Timestamp in milliseconds of the message.
- `_partition` — Partition of Kafka topic.
- `_headers.name` — Array of message's headers keys.
- `_headers.value` — Array of message's headers values.
**See Also**

View File

@ -0,0 +1,76 @@
---
sidebar_position: 54
sidebar_label: JSON
---
# JSON {#json-data-type}
Stores JavaScript Object Notation (JSON) documents in a single column.
`JSON` is an alias for `Object('json')`.
:::warning
The JSON data type is an experimental feature. To use it, set `allow_experimental_object_type = 1`.
:::
## Example {#usage-example}
**Example 1**
Creating a table with a `JSON` column and inserting data into it:
```sql
CREATE TABLE json
(
o JSON
)
ENGINE = Memory
```
```sql
INSERT INTO json VALUES ('{"a": 1, "b": { "c": 2, "d": [1, 2, 3] }}')
```
```sql
SELECT o.a, o.b.c, o.b.d[3] FROM json
```
```text
┌─o.a─┬─o.b.c─┬─arrayElement(o.b.d, 3)─┐
│ 1 │ 2 │ 3 │
└─────┴───────┴────────────────────────┘
```
**Example 2**
To be able to create an ordered `MergeTree` family table the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format:
```sql
CREATE TABLE logs
(
timestamp DateTime,
message JSON
)
ENGINE = MergeTree
ORDER BY timestamp
```
```sql
INSERT INTO logs
SELECT parseDateTimeBestEffort(JSONExtractString(json, 'timestamp')), json
FROM file('access.json.gz', JSONAsString)
```
## Displaying JSON columns
When displaying a `JSON` column ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can display the field names as well by setting `output_format_json_named_tuples_as_objects = 1`:
```sql
SET output_format_json_named_tuples_as_objects = 1
SELECT * FROM json FORMAT JSONEachRow
```
```text
{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}}
```

View File

@ -12,7 +12,7 @@ The following operations are available:
- `ALTER TABLE [db].name DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk.
- `ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name` - The query rebuilds the secondary index `name` in the partition `partition_name`. Implemented as a [mutation](../../../../sql-reference/statements/alter/index.md#mutations). To rebuild index over the whole data in the table you need to remove `IN PARTITION` from query.
- `ALTER TABLE [db.]table MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](../../../../sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
The first two commands are lightweight in a sense that they only change metadata or remove files.
@ -20,4 +20,4 @@ Also, they are replicated, syncing indices metadata via ZooKeeper.
:::note
Index manipulation is supported only for tables with [`*MergeTree`](../../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../../engines/table-engines/mergetree-family/replication.md) variants).
:::
:::

View File

@ -104,7 +104,7 @@ There are many nuances to processing `NULL`. For example, if at least one of the
In queries, you can check `NULL` using the [IS NULL](../sql-reference/operators/index.md#operator-is-null) and [IS NOT NULL](../sql-reference/operators/index.md) operators and the related functions `isNull` and `isNotNull`.
### Heredoc {#heredeoc}
### Heredoc {#heredoc}
A [heredoc](https://en.wikipedia.org/wiki/Here_document) is a way to define a string (often multiline), while maintaining the original formatting. A heredoc is defined as a custom string literal, placed between two `$` symbols, for example `$heredoc$`. A value between two heredocs is processed "as-is".

View File

@ -102,7 +102,7 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def')
В запросах можно проверить `NULL` с помощью операторов [IS NULL](operators/index.md#operator-is-null) и [IS NOT NULL](operators/index.md), а также соответствующих функций `isNull` и `isNotNull`.
### Heredoc {#heredeoc}
### Heredoc {#heredoc}
Синтаксис [heredoc](https://ru.wikipedia.org/wiki/Heredoc-синтаксис) — это способ определения строк с сохранением исходного формата (часто с переносом строки). `Heredoc` задается как произвольный строковый литерал между двумя символами `$`, например `$heredoc$`. Значение между двумя `heredoc` обрабатывается "как есть".

View File

@ -4,26 +4,437 @@
结果类型是一个整数其位数等于其参数的最大位。如果至少有一个参数为有符数字则结果为有符数字。如果参数是浮点数则将其强制转换为Int64。
## bitAnd(a,b) {#bitanda-b}
## bitAnd(a, b) {#bitanda-b}
## bitOr(a,b) {#bitora-b}
## bitOr(a, b) {#bitora-b}
## bitXor(a,b) {#bitxora-b}
## bitXor(a, b) {#bitxora-b}
## bitNot(a) {#bitnota}
## bitShiftLeft(a,b) {#bitshiftlefta-b}
## bitShiftLeft(a, b) {#bitshiftlefta-b}
## bitShiftRight(a,b) {#bitshiftrighta-b}
将值的二进制表示向左移动指定数量的位。
## bitRotateLeft(a,b) {#bitrotatelefta-b}
`FixedString``String` 被视为单个多字节值。
## bitRotateRight(a,b) {#bitrotaterighta-b}
`FixedString` 值的位在移出时会丢失。相反,`String` 值使用额外的字节进行扩展,因此不会丢失任何位。
## bitTest(a,b) {#bittesta-b}
**语法**
## bitTestAll(a,b) {#bittestalla-b}
``` sql
bitShiftLeft(a, b)
```
**参数**
- `a` — 要进行移位操作的值。类型可以为[Integer types](../../sql-reference/data-types/int-uint.md)[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
- `b` — 移位的次数。类型为[Unsigned integer types](../../sql-reference/data-types/int-uint.md)允许使用64位数字及64位以下的数字类型。
**返回值**
- 移位后的值。
返回值的类型与输入值的类型相同。
**示例**
在以下查询中,[bin](encoding-functions.md#bin)和[hex](encoding-functions.md#hex)函数用于显示移位值的位。
``` sql
SELECT 99 AS a, bin(a), bitShiftLeft(a, 2) AS a_shifted, bin(a_shifted);
SELECT 'abc' AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
```
结果:
``` text
┌──a─┬─bin(99)──┬─a_shifted─┬─bin(bitShiftLeft(99, 2))─┐
│ 99 │ 01100011 │ 140 │ 10001100 │
└────┴──────────┴───────────┴──────────────────────────┘
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftLeft('abc', 4))─┐
│ abc │ 616263 │ &0 │ 06162630 │
└─────┴────────────┴───────────┴─────────────────────────────┘
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftLeft(toFixedString('abc', 3), 4))─┐
│ abc │ 616263 │ &0 │ 162630 │
└─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘
```
## bitShiftRight(a, b) {#bitshiftrighta-b}
将值的二进制表示向右移动指定数量的位。
`FixedString`或`String`被视为单个多字节值。请注意,`String`值的长度会随着位的移出而减少。
**语法**
``` sql
bitShiftRight(a, b)
```
**参数**
- `a` — 需要进行位移的值。类型可以为[Integer types](../../sql-reference/data-types/int-uint.md)[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
- `b` — 移位的次数。类型为[Unsigned integer types](../../sql-reference/data-types/int-uint.md)允许使用64位数字及64位以下的数字类型。
**返回值**
- 移位后的值。
返回值的类型与输入值的类型相同。
**示例**
查询语句:
``` sql
SELECT 101 AS a, bin(a), bitShiftRight(a, 2) AS a_shifted, bin(a_shifted);
SELECT 'abc' AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
```
结果:
``` text
┌───a─┬─bin(101)─┬─a_shifted─┬─bin(bitShiftRight(101, 2))─┐
│ 101 │ 01100101 │ 25 │ 00011001 │
└─────┴──────────┴───────────┴────────────────────────────┘
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftRight('abc', 12))─┐
│ abc │ 616263 │ │ 0616 │
└─────┴────────────┴───────────┴───────────────────────────────┘
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftRight(toFixedString('abc', 3), 12))─┐
│ abc │ 616263 │ │ 000616 │
└─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘
```
## bitRotateLeft(a, b) {#bitrotatelefta-b}
## bitRotateRight(a, b) {#bitrotaterighta-b}
## bitSlice(s, offset, length)
返回从`offset`索引中的`length`位长的位开始的子字符串,位索引从 1 开始。
**语法**
``` sql
bitSlice(s, offset[, length])
```
**参数**
- `s` — 类型可以是[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
- `offset` — 带位的起始索引,正值表示左侧偏移,负值表示右侧缩进,位编号从 1 开始。
- `length` — 带位的子串长度。如果您指定一个负值,该函数将返回一个开放子字符串 \[offset, array_length - length\]。如果省略该值,该函数将返回子字符串 \[offset, the_end_string\]。如果长度超过s将被截断。如果长度不是8的倍数则在右边填充0。
**返回值**
- 子字符串,类型为[String](../../sql-reference/data-types/string.md)。
**示例**
查询语句:
``` sql
select bin('Hello'), bin(bitSlice('Hello', 1, 8))
select bin('Hello'), bin(bitSlice('Hello', 1, 2))
select bin('Hello'), bin(bitSlice('Hello', 1, 9))
select bin('Hello'), bin(bitSlice('Hello', -4, 8))
```
结果:
``` text
┌─bin('Hello')─────────────────────────────┬─bin(bitSlice('Hello', 1, 8))─┐
│ 0100100001100101011011000110110001101111 │ 01001000 │
└──────────────────────────────────────────┴──────────────────────────────┘
┌─bin('Hello')─────────────────────────────┬─bin(bitSlice('Hello', 1, 2))─┐
│ 0100100001100101011011000110110001101111 │ 01000000 │
└──────────────────────────────────────────┴──────────────────────────────┘
┌─bin('Hello')─────────────────────────────┬─bin(bitSlice('Hello', 1, 9))─┐
│ 0100100001100101011011000110110001101111 │ 0100100000000000 │
└──────────────────────────────────────────┴──────────────────────────────┘
┌─bin('Hello')─────────────────────────────┬─bin(bitSlice('Hello', -4, 8))─┐
│ 0100100001100101011011000110110001101111 │ 11110000 │
└──────────────────────────────────────────┴───────────────────────────────┘
```
## bitTest {#bittest}
取任意整数并将其转换为[binary form](https://en.wikipedia.org/wiki/Binary_number)返回指定位置的位值。位值从右到左数从0开始计数。
**语法**
``` sql
SELECT bitTest(number, index)
```
**参数**
- `number` 整数。
- `index` 要获取位值的位置。
**返回值**
返回指定位置的位值
类型为:`UInt8`。
**示例**
例如,十进制数字 43 在二进制的表示是 101011。
查询语句:
``` sql
SELECT bitTest(43, 1);
```
结果:
``` text
┌─bitTest(43, 1)─┐
│ 1 │
└────────────────┘
```
另一个示例:
查询语句:
``` sql
SELECT bitTest(43, 2);
```
结果:
``` text
┌─bitTest(43, 2)─┐
│ 0 │
└────────────────┘
```
## bitTestAll {#bittestall}
返回给定位置所有位的 [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) 进行与操作的结果。位值从右到左数从0开始计数。
与运算的结果:
0 AND 0 = 0
0 AND 1 = 0
1 AND 0 = 0
1 AND 1 = 1
**语法**
``` sql
SELECT bitTestAll(number, index1, index2, index3, index4, ...)
```
**参数**
- `number` 整数。
- `index1`, `index2`, `index3`, `index4` 位的位置。例如,对于一组位置 (`index1`, `index2`, `index3`, `index4`) 当且仅当它的所有位置都为真时才为真 (`index1` ⋀ `index2`, ⋀ `index3``index4` )。
**返回值**
返回逻辑与的结果。
类型为: `UInt8`
**示例**
例如,十进制数字 43 在二进制的表示是 101011。
查询语句:
``` sql
SELECT bitTestAll(43, 0, 1, 3, 5);
```
结果:
``` text
┌─bitTestAll(43, 0, 1, 3, 5)─┐
│ 1 │
└────────────────────────────┘
```
另一个例子:
查询语句:
``` sql
SELECT bitTestAll(43, 0, 1, 3, 5, 2);
```
结果:
``` text
┌─bitTestAll(43, 0, 1, 3, 5, 2)─┐
│ 0 │
└───────────────────────────────┘
```
## bitTestAny {#bittestany}
返回给定位置所有位的 [logical disjunction](https://en.wikipedia.org/wiki/Logical_disjunction) 进行或操作的结果。位值从右到左数从0开始计数。
或运算的结果:
0 OR 0 = 0
0 OR 1 = 1
1 OR 0 = 1
1 OR 1 = 1
**语法**
``` sql
SELECT bitTestAny(number, index1, index2, index3, index4, ...)
```
**参数**
- `number` 整数。
- `index1`, `index2`, `index3`, `index4` 位的位置。
**返回值**
返回逻辑或的结果。
类型为: `UInt8`
**示例**
例如,十进制数字 43 在二进制的表示是 101011。
查询语句:
``` sql
SELECT bitTestAny(43, 0, 2);
```
结果:
``` text
┌─bitTestAny(43, 0, 2)─┐
│ 1 │
└──────────────────────┘
```
另一个例子:
查询语句:
``` sql
SELECT bitTestAny(43, 4, 2);
```
结果:
``` text
┌─bitTestAny(43, 4, 2)─┐
│ 0 │
└──────────────────────┘
```
## bitCount {#bitcount}
计算数字的二进制表示中值为 1 的位数。
**语法**
``` sql
bitCount(x)
```
**参数**
- `x` — 类型为[Integer](../../sql-reference/data-types/int-uint.md)或[floating-point](../../sql-reference/data-types/float.md)数字。该函数使用内存中的值表示。它允许支持浮点数。
**返回值**
- 输入数字中值为 1 的位数。
该函数不会将输入值转换为更大的类型 ([sign extension](https://en.wikipedia.org/wiki/Sign_extension))。 因此,例如,`bitCount(toUInt8(-1)) = 8`。
类型为: `UInt8`
**示例**
以十进制数字 333 为例,它的二进制表示为: 0000000101001101。
查询语句:
``` sql
SELECT bitCount(333);
```
结果:
``` text
┌─bitCount(333)─┐
│ 5 │
└───────────────┘
```
## bitHammingDistance {#bithammingdistance}
返回两个整数值的位表示之间的 [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance)。可与 [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash) 函数一起使用,用于检测半重复字符串。距离越小,这些字符串就越有可能相同。
**语法**
``` sql
bitHammingDistance(int1, int2)
```
**参数**
- `int1` — 第一个整数值。类型为[Int64](../../sql-reference/data-types/int-uint.md)。
- `int2` — 第二个整数值。类型为[Int64](../../sql-reference/data-types/int-uint.md)。
**返回值**
- 汉明距离。
类型为: [UInt8](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT bitHammingDistance(111, 121);
```
结果:
``` text
┌─bitHammingDistance(111, 121)─┐
│ 3 │
└──────────────────────────────┘
```
使用[SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash)函数:
``` sql
SELECT bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat'));
```
结果:
``` text
┌─bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat'))─┐
│ 5 │
└──────────────────────────────────────────────────────────────────────────────┘
```
## bitTestAny(a,b) {#bittestanya-b}
[来源文章](https://clickhouse.com/docs/en/query_language/functions/bit_functions/) <!--hide-->

View File

@ -12,6 +12,8 @@ SELECT if(cond, then, else)
如果条件 `cond` 的计算结果为非零值,则返回表达式 `then` 的结果,并且跳过表达式 `else` 的结果(如果存在)。 如果 `cond` 为零或 `NULL`,则将跳过 `then` 表达式的结果,并返回 `else` 表达式的结果(如果存在)。
您可以使用[short_circuit_function_evaluation](../../operations/settings/settings.md#short-circuit-function-evaluation) 设置,来根据短路方案计算 `if` 函数。如果启用此设置,则仅在`cond`为真的时,加载`then`表达式,此时不加载`else`表达式。仅在`cond`为假时,加载`else`表达式,此时不加载`then`表达式。例如,执行查询`SELECT if(number = 0, 0, intDiv(42, number)) FROM numbers(10)`时不会抛出除以零的异常,因为`intDiv(42, number)`会仅对不满足条件`number = 0`的数字进行处理。
**参数**
- `cond` 条件结果可以为零或不为零。 类型是 UInt8Nullable(UInt8) 或 NULL。
@ -102,11 +104,21 @@ WHERE isNotNull(left) AND isNotNull(right)
- `then`和`else`可以是`NULL`
**参考**
- [ifNotFinite](../../sql-reference/functions/other-functions.md#ifnotfinite)。
## multiIf {#multiif}
允许您在查询中更紧凑地编写[CASE](../operators/index.md#operator_case)运算符。
multiIf(cond_1, then_1, cond_2, then_2...else)
**语法**
``` sql
multiIf(cond_1, then_1, cond_2, then_2, ..., else)
```
您可以使用[short_circuit_function_evaluation](../../operations/settings/settings.md#short-circuit-function-evaluation) 设置,根据短路方案计算 `multiIf` 函数。如果启用此设置,则 `then_i` 表达式仅在 `((NOT cond_1) AND (NOT cond_2) AND ... AND (NOT cond_{i-1}) AND cond_i)` 为真,`cond_i ` 将仅对 `((NOT cond_1) AND (NOT cond_2) AND ... AND (NOT cond_{i-1}))` 为真的行进行执行。例如执行查询“SELECT multiIf(number = 2, intDiv(1, number), number = 5) FROM numbers(10)”时不会抛出除以零的异常。
**参数:**

View File

@ -12,16 +12,61 @@ SELECT
toString(time, 'US/Samoa') AS time_samoa
```
┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐
│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │
└─────────────────────┴────────────┴────────────┴─────────────────────┘
``` text
┌────────────────time─┬─date_local─┬─date_yekat─┬─time_samoa──────────┐
│ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-16 │ 2016-06-15 09:00:00 │
└─────────────────────┴────────────┴────────────┴─────────────────────┘
```
仅支持与UTC相差一整小时的时区。
## timeZone {#timezone}
返回服务器的时区。
如果它在分布式表的上下文中执行,那么它会生成一个普通列,其中包含与每个分片相关的值。否则它会产生一个常数值。
**语法**
``` sql
timeZone()
```
别名:`timezone`。
**返回值**
- 时区。
类型为: [String](../../sql-reference/data-types/string.md)。
## toTimeZone {#totimezone}
将Date或DateTime转换为指定的时区。 时区是Date/DateTime类型的属性。 表字段或结果集的列的内部值(秒数)不会更改,列的类型会更改,并且其字符串表示形式也会相应更改。
**语法**
``` sql
toTimezone(value, timezone)
```
别名:`toTimezone`。
**参数**
- `value` — 时间或日期和时间。类型为[DateTime64](../../sql-reference/data-types/datetime64.md)。
- `timezone` — 返回值的时区。类型为 [String](../../sql-reference/data-types/string.md)。 这个参数是一个常量,因为 `toTimezone` 改变了列的时区(时区是 `DateTime` 类型的属性)。
**返回值**
- 日期和时间。
类型为: [DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
```sql
SELECT
toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc,
@ -52,44 +97,138 @@ int32samoa: 1546300800
`toTimeZone(time_utc, 'Asia/Yekaterinburg')``DateTime('UTC')` 类型转换为 `DateTime('Asia/Yekaterinburg')`. 内部值 (Unixtimestamp) 1546300800 保持不变, 但是字符串表示(toString() 函数的结果值) 由 `time_utc: 2019-01-01 00:00:00` 转换为o `time_yekat: 2019-01-01 05:00:00`.
## timeZoneOf {#timezoneof}
返回[DateTime](../../sql-reference/data-types/datetime.md)或者[DateTime64](../../sql-reference/data-types/datetime64.md)数据类型的时区名称。
**语法**
``` sql
timeZoneOf(value)
```
别名: `timezoneOf`
**参数**
- `value` — 日期和时间。类型为[DateTime](../../sql-reference/data-types/datetime.md)或者[DateTime64](../../sql-reference/data-types/datetime64.md)。
**返回值**
- 时区名称。
类型为:[String](../../sql-reference/data-types/string.md)。
**示例**
查询语句:
``` sql
SELECT timezoneOf(now());
```
结果:
``` text
┌─timezoneOf(now())─┐
│ Etc/UTC │
└───────────────────┘
```
## timeZoneOffset {#timezoneoffset}
返回从[UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time)开始到现在以秒为单位的时区偏移量。该函数考虑到[夏时令](https://en.wikipedia.org/wiki/Daylight_saving_time)并在指定日期和时间更改历史时区。
[IANA timezone database](https://www.iana.org/time-zones)用于计算偏移量。
**语法**
``` sql
timeZoneOffset(value)
```
别名: `timezoneOffset`
**参数**
- `value` — 日期和时间。类型为[DateTime](../../sql-reference/data-types/datetime.md)或者[DateTime64](../../sql-reference/data-types/datetime64.md)。
**返回值**
- 以秒为单位的UTC偏移量。
类型为: [Int32](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT toDateTime('2021-04-21 10:20:30', 'America/New_York') AS Time, toTypeName(Time) AS Type,
timeZoneOffset(Time) AS Offset_in_seconds, (Offset_in_seconds / 3600) AS Offset_in_hours;
```
结果:
``` text
┌────────────────Time─┬─Type─────────────────────────┬─Offset_in_seconds─┬─Offset_in_hours─┐
│ 2021-04-21 10:20:30 │ DateTime('America/New_York') │ -14400 │ -4 │
└─────────────────────┴──────────────────────────────┴───────────────────┴─────────────────┘
```
## toYear {#toyear}
将Date或DateTime转换为包含年份编号AD的UInt16类型的数字。
别名为:`YEAR`。
## toQuarter {#toquarter}
将Date或DateTime转换为包含季度编号的UInt8类型的数字。
别名为:`QUARTER`。
## toMonth {#tomonth}
将Date或DateTime转换为包含月份编号1-12的UInt8类型的数字。
别名为:`MONTH`。
## toDayOfYear {#todayofyear}
将Date或DateTime转换为包含一年中的某一天的编号的UInt161-366类型的数字。
别名为: `DAYOFYEAR`
## toDayOfMonth {#todayofmonth}
将Date或DateTime转换为包含一月中的某一天的编号的UInt81-31类型的数字。
别名为:`DAYOFMONTH``DAY`。
## toDayOfWeek {#todayofweek}
将Date或DateTime转换为包含一周中的某一天的编号的UInt8周一是1, 周日是7类型的数字。
别名为:`DAYOFWEEK`。
## toHour {#tohour}
将DateTime转换为包含24小时制0-23小时数的UInt8数字。
这个函数假设如果时钟向前移动它是一个小时发生在凌晨2点如果时钟被移回它是一个小时发生在凌晨3点这并非总是如此 - 即使在莫斯科时钟在不同的时间两次改变)。
别名为: `HOUR`
## toMinute {#tominute}
将DateTime转换为包含一小时中分钟数0-59的UInt8数字。
别名为: `MINUTE`
## toSecond {#tosecond}
将DateTime转换为包含一分钟中秒数0-59的UInt8数字。
闰秒不计算在内。
别名为: `SECOND`
## toUnixTimestamp {#to-unix-timestamp}
对于DateTime参数将值转换为UInt32类型的数字-Unix时间戳https://en.wikipedia.org/wiki/Unix_time
@ -124,6 +263,10 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp
└────────────────┘
```
:::注意
下面描述的返回类型 `toStartOf` 函数是 `Date``DateTime`。尽管这些函数可以将 `DateTime64` 作为参数但将超出正常范围1925年-2283年`DateTime64` 传递给它们会给出不正确的结果。
:::
## toStartOfYear {#tostartofyear}
将Date或DateTime向前取整到本年的第一天。
@ -429,6 +572,263 @@ SELECT now(), date_trunc('hour', now(), 'Asia/Istanbul');
- [toStartOfInterval](#tostartofintervaltime-or-data-interval-x-unit-time-zone)
## date_add {#date_add}
将时间间隔或日期间隔添加到提供的日期或带时间的日期。
**语法**
``` sql
date_add(unit, value, date)
```
别名为:`dateAdd`, `DATE_ADD`
**参数**
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `value` — 要添加的间隔值。类型为[Int](../../sql-reference/data-types/int-uint.md)。
- `date` — 添加`value`的日期或日期。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**返回值**
通过将 `value` 以`unit` 表示,添加到`date` 获得的日期或带时间的日期。
类型为: [Date](../../sql-reference/data-types/date.md)或[DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
```sql
SELECT date_add(YEAR, 3, toDate('2018-01-01'));
```
结果:
```text
┌─plus(toDate('2018-01-01'), toIntervalYear(3))─┐
│ 2021-01-01 │
└───────────────────────────────────────────────┘
```
## date_diff {#date_diff}
返回两个日期或具有时间值的日期之间的差值。
**语法**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
别名为: `dateDiff`, `DATE_DIFF`
**参数**
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `startdate` — 要减去的第一个时间值(减数)。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
- `enddate` — 要减去的第二个时间值(被减数)。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (可选项)。如果指定,它适用于 `startdate``enddate`。如果未指定,则使用 `startdate``enddate` 的时区。如果它们不相同,则结果未指定。类型为[String](../../sql-reference/data-types/string.md)。
**返回值**
`unit` 表示的 `enddate``startdate` 之间的区别。
类型为: [Int](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
结果:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
## date_sub {#date_sub}
从提供的日期或带时间的日期中减去时间间隔或日期间隔。
**语法**
``` sql
date_sub(unit, value, date)
```
别名为: `dateSub`, `DATE_SUB`.
**参数**
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `value` — 要减去的时间。类型为[Int](../../sql-reference/data-types/int-uint.md)。
- `date` — 被减去`value`的日期或日期。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**返回值**
`date` 中减去以`unit` 表示的`value` 得到的日期或带时间的日期。
类型为:[Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
``` sql
SELECT date_sub(YEAR, 3, toDate('2018-01-01'));
```
结果:
``` text
┌─minus(toDate('2018-01-01'), toIntervalYear(3))─┐
│ 2015-01-01 │
└────────────────────────────────────────────────┘
```
## timestamp_add {#timestamp_add}
将指定的时间值与提供的日期或日期时间值相加。
**语法**
``` sql
timestamp_add(date, INTERVAL value unit)
```
别名为: `timeStampAdd`, `TIMESTAMP_ADD`.
**参数**
- `date` — 日期或日期与时间。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
- `value` — 要添加的间隔值。类型为[Int](../../sql-reference/data-types/int-uint.md)。
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
**返回值**
以`unit`表示的指定`value`的日期或带时间的日期添加到`date`。
类型为:[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
```sql
select timestamp_add(toDate('2018-01-01'), INTERVAL 3 MONTH);
```
结果:
```text
┌─plus(toDate('2018-01-01'), toIntervalMonth(3))─┐
│ 2018-04-01 │
└────────────────────────────────────────────────┘
```
## timestamp_sub {#timestamp_sub}
从提供的日期或带时间的日期中减去时间间隔。
**语法**
``` sql
timestamp_sub(unit, value, date)
```
别名为: `timeStampSub`, `TIMESTAMP_SUB`
**参数**
- `unit``value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `value` — 要减去的间隔值。类型为[Int](../../sql-reference/data-types/int-uint.md)。
- `date` — 日期或日期与时间。类型为[Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**返回值**
`date` 中减去以`unit` 表示的`value` 得到的日期或带时间的日期。
类型为: [Date](../../sql-reference/data-types/date.md)或者[DateTime](../../sql-reference/data-types/datetime.md)。
**示例**
查询语句:
```sql
select timestamp_sub(MONTH, 5, toDateTime('2018-12-18 01:02:03'));
```
结果:
```text
┌─minus(toDateTime('2018-12-18 01:02:03'), toIntervalMonth(5))─┐
│ 2018-07-18 01:02:03 │
└──────────────────────────────────────────────────────────────┘
```
# now {#now}
返回当前日期和时间。
@ -540,50 +940,6 @@ SELECT
│ 2018-01-01 │ 2018-01-01 00:00:00 │
└──────────────────────────┴───────────────────────────────┘
## dateDiff {#datediff}
返回两个Date或DateTime类型之间的时差。
**语法**
``` sql
dateDiff('unit', startdate, enddate, [timezone])
```
**参数**
- `unit` — 返回结果的时间单位。 [String](../../sql-reference/syntax.md#syntax-string-literal).
支持的时间单位: second, minute, hour, day, week, month, quarter, year.
- `startdate` — 第一个待比较值。 [Date](../../sql-reference/data-types/date.md) 或 [DateTime](../../sql-reference/data-types/datetime.md).
- `enddate` — 第二个待比较值。 [Date](../../sql-reference/data-types/date.md) 或 [DateTime](../../sql-reference/data-types/datetime.md).
- `timezone` — 可选参数。 如果指定了,则同时适用于`startdate`和`enddate`。如果不指定,则使用`startdate`和`enddate`的时区。如果两个时区不一致,则结果不可预料。
**返回值**
以`unit`为单位的`startdate`和`enddate`之间的时差。
类型: `int`.
**示例**
查询:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
结果:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size}
它返回一个时间数组其中包括从从«StartTime»开始到«StartTime + Duration 秒»内的所有符合«size»以秒为单位步长的时间点。其中«size»是一个可选参数默认为1800。
@ -652,7 +1008,44 @@ SELECT formatDateTime(toDate('2010-01-04'), '%g')
└────────────────────────────────────────────┘
```
[Original article](https://clickhouse.com/docs/en/query_language/functions/date_time_functions/) <!--hide-->
## dateName {#dataname}
返回日期的指定部分。
**语法**
``` sql
dateName(date_part, date)
```
**参数**
- `date_part` — 日期部分。可能的值为:'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'。类型为[String](../../sql-reference/data-types/string.md)。
- `date` — 日期。类型为[Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md)或者[DateTime64](../../sql-reference/data-types/datetime64.md)。
- `timezone` — 时区(可选项)。类型为[String](../../sql-reference/data-types/string.md)。
**返回值**
- 日期的指定部分。
类型为: [String](../../sql-reference/data-types/string.md#string)。
**示例**
查询语句:
```sql
WITH toDateTime('2021-04-14 11:22:33') AS date_value
SELECT dateName('year', date_value), dateName('month', date_value), dateName('day', date_value);
```
结果:
```text
┌─dateName('year', date_value)─┬─dateName('month', date_value)─┬─dateName('day', date_value)─┐
│ 2021 │ April │ 14 │
└──────────────────────────────┴───────────────────────────────┴─────────────────────────────
```
## FROM_UNIXTIME
@ -683,3 +1076,149 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime
│ 2009-02-11 14:42:23 │
└─────────────────────┘
```
## toModifiedJulianDay {#tomodifiedjulianday}
将文本形式 `YYYY-MM-DD` 的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期转换为 Int32 中的 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字。此功能支持从`0000-01-01`到`9999-12-31`的日期。如果无法将参数解析为日期或日期无效,则会引发异常。
**语法**
``` sql
toModifiedJulianDay(date)
```
**参数**
- `date` — 文本形式的日期。类型为[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
**返回值**
- 转换的儒略日数。
类型为: [Int32](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT toModifiedJulianDay('2020-01-01');
```
结果:
``` text
┌─toModifiedJulianDay('2020-01-01')─┐
│ 58849 │
└───────────────────────────────────┘
```
## toModifiedJulianDayOrNull {#tomodifiedjuliandayornull}
类似于[toModifiedJulianDay()](#tomodifiedjulianday),但它不会引发异常,而是返回 `NULL`
**语法**
``` sql
toModifiedJulianDayOrNull(date)
```
**参数**
- `date` — 文本形式的日期。类型为[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
**返回值**
- 转换的儒略日数。
类型为: [Nullable(Int32)](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT toModifiedJulianDayOrNull('2020-01-01');
```
结果:
``` text
┌─toModifiedJulianDayOrNull('2020-01-01')─┐
│ 58849 │
└─────────────────────────────────────────┘
```
## fromModifiedJulianDay {#frommodifiedjulianday}
将 [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) 数字转换为 `YYYY-MM-DD` 文本格式的 [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) 日期。该函数支持从 `-678941``2973119` 的天数(分别代表 0000-01-01 和 9999-12-31。如果天数超出支持范围则会引发异常。
**语法**
``` sql
fromModifiedJulianDay(day)
```
**参数**
- `day` — 需要转换的儒略日数。类型为[Any integral types](../../sql-reference/data-types/int-uint.md)。
**返回值**
- 文本形式的日期。
类型为: [String](../../sql-reference/data-types/string.md)。
**示例**
查询语句:
``` sql
SELECT fromModifiedJulianDay(58849);
```
结果:
``` text
┌─fromModifiedJulianDay(58849)─┐
│ 2020-01-01 │
└──────────────────────────────┘
```
## fromModifiedJulianDayOrNull {#frommodifiedjuliandayornull}
类似于[fromModifiedJulianDayOrNull()](#frommodifiedjuliandayornull),但它不会引发异常,而是返回 `NULL`
**语法**
``` sql
fromModifiedJulianDayOrNull(day)
```
**参数**
- `day` — 需要转换的儒略日数。类型为[Any integral types](../../sql-reference/data-types/int-uint.md)。
**返回值**
- 文本形式的日期。
类型为: [Nullable(String)](../../sql-reference/data-types/string.md)。
**示例**
查询语句:
``` sql
SELECT fromModifiedJulianDayOrNull(58849);
```
结果:
``` text
┌─fromModifiedJulianDayOrNull(58849)─┐
│ 2020-01-01 │
└────────────────────────────────────┘
```
[Original article](https://clickhouse.com/docs/en/query_language/functions/date_time_functions/) <!--hide-->

View File

@ -1,8 +1,8 @@
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
#include <fcntl.h>
#include <signal.h>
#include <time.h>
#include <csignal>
#include <ctime>
#include <iostream>
#include <fstream>
#include <iomanip>

View File

@ -1,4 +1,4 @@
#include <stdlib.h>
#include <cstdlib>
#include <fcntl.h>
#include <map>
#include <iostream>

View File

@ -1,5 +1,5 @@
#include <signal.h>
#include <setjmp.h>
#include <csignal>
#include <csetjmp>
#include <unistd.h>
#ifdef __linux__

View File

@ -5,7 +5,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <errno.h>
#include <cerrno>
#include <pwd.h>
#include <unistd.h>
#include <Poco/Version.h>

View File

@ -129,8 +129,8 @@
#query_div
{
/* Make enough space for medium/large queries but allowing query textarea to grow. */
min-height: 20%;
/* Make enough space for even huge queries. */
height: 20%;
}
#query

View File

@ -17,7 +17,7 @@
#include <Common/logger_useful.h>
#include <boost/algorithm/string/join.hpp>
#include <boost/range/algorithm/set_algorithm.hpp>
#include <assert.h>
#include <cassert>
namespace DB

View File

@ -14,6 +14,7 @@
#include <AggregateFunctions/IAggregateFunction.h>
#include <Common/config.h>
#include <Common/TargetSpecific.h>
#if USE_EMBEDDED_COMPILER
# include <llvm/IR/IRBuilder.h>
@ -58,8 +59,11 @@ struct AggregateFunctionSumData
}
/// Vectorized version
MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(addManyImpl,
MULTITARGET_FH(
template <typename Value>
void NO_SANITIZE_UNDEFINED NO_INLINE addMany(const Value * __restrict ptr, size_t start, size_t end)
void NO_SANITIZE_UNDEFINED NO_INLINE
), /*addManyImpl*/ MULTITARGET_FB((const Value * __restrict ptr, size_t start, size_t end) /// NOLINT
{
ptr += start;
size_t count = end - start;
@ -95,11 +99,34 @@ struct AggregateFunctionSumData
++ptr;
}
Impl::add(sum, local_sum);
})
)
/// Vectorized version
template <typename Value>
void NO_INLINE addMany(const Value * __restrict ptr, size_t start, size_t end)
{
#if USE_MULTITARGET_CODE
if (isArchSupported(TargetArch::AVX2))
{
addManyImplAVX2(ptr, start, end);
return;
}
else if (isArchSupported(TargetArch::SSE42))
{
addManyImplSSE42(ptr, start, end);
return;
}
#endif
addManyImpl(ptr, start, end);
}
MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(addManyConditionalInternalImpl,
MULTITARGET_FH(
template <typename Value, bool add_if_zero>
void NO_SANITIZE_UNDEFINED NO_INLINE
addManyConditionalInternal(const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
), /*addManyConditionalInternalImpl*/ MULTITARGET_FB((const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end) /// NOLINT
{
ptr += start;
size_t count = end - start;
@ -163,6 +190,27 @@ struct AggregateFunctionSumData
++condition_map;
}
Impl::add(sum, local_sum);
})
)
/// Vectorized version
template <typename Value, bool add_if_zero>
void NO_INLINE addManyConditionalInternal(const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
{
#if USE_MULTITARGET_CODE
if (isArchSupported(TargetArch::AVX2))
{
addManyConditionalInternalImplAVX2<Value, add_if_zero>(ptr, condition_map, start, end);
return;
}
else if (isArchSupported(TargetArch::SSE42))
{
addManyConditionalInternalImplSSE42<Value, add_if_zero>(ptr, condition_map, start, end);
return;
}
#endif
addManyConditionalInternalImpl<Value, add_if_zero>(ptr, condition_map, start, end);
}
template <typename Value>

View File

@ -540,7 +540,7 @@ public:
Arena * arena)
const override
{
size_t current_offset = 0;
size_t current_offset = offsets[static_cast<ssize_t>(row_begin) - 1];
for (size_t i = row_begin; i < row_end; ++i)
{
size_t next_offset = offsets[i];

View File

@ -103,8 +103,9 @@ class QuantileTDigest
*/
static Value interpolate(Value x, Value x1, Value y1, Value x2, Value y2)
{
/// Symmetric interpolation for better results with infinities.
double k = (x - x1) / (x2 - x1);
return y1 + k * (y2 - y1);
return (1 - k) * y1 + k * y2;
}
struct RadixSortTraits
@ -137,6 +138,11 @@ class QuantileTDigest
compress();
}
inline bool canBeMerged(const BetterFloat & l_mean, const Value & r_mean)
{
return l_mean == r_mean || (!std::isinf(l_mean) && !std::isinf(r_mean));
}
void compressBrute()
{
if (centroids.size() <= params.max_centroids)
@ -149,13 +155,17 @@ class QuantileTDigest
BetterFloat l_mean = l->mean; // We have high-precision temporaries for numeric stability
BetterFloat l_count = l->count;
size_t batch_pos = 0;
for (;r != centroids.end(); ++r)
for (; r != centroids.end(); ++r)
{
if (batch_pos < batch_size - 1)
{
/// The left column "eats" the right. Middle of the batch
l_count += r->count;
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
if (r->mean != l_mean) /// Handling infinities of the same sign well.
{
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
}
l->mean = l_mean;
l->count = l_count;
batch_pos += 1;
@ -163,8 +173,11 @@ class QuantileTDigest
else
{
// End of the batch, start the next one
sum += l->count; // Not l_count, otherwise actual sum of elements will be different
++l;
if (!std::isnan(l->mean)) /// Skip writing batch result if we compressed something to nan.
{
sum += l->count; // Not l_count, otherwise actual sum of elements will be different
++l;
}
/// We skip all the values "eaten" earlier.
*l = *r;
@ -173,8 +186,17 @@ class QuantileTDigest
batch_pos = 0;
}
}
count = sum + l_count; // Update count, it might be different due to += inaccuracy
centroids.resize(l - centroids.begin() + 1);
if (!std::isnan(l->mean))
{
count = sum + l_count; // Update count, it might be different due to += inaccuracy
centroids.resize(l - centroids.begin() + 1);
}
else /// Skip writing last batch if (super unlikely) it's nan.
{
count = sum;
centroids.resize(l - centroids.begin());
}
// Here centroids.size() <= params.max_centroids
}
@ -200,11 +222,8 @@ public:
BetterFloat l_count = l->count;
while (r != centroids.end())
{
/// N.B. Piece of logic which compresses the same singleton centroids into one centroid is removed
/// because: 1) singleton centroids are being processed in unusual way in recent version of algorithm
/// and such compression would break this logic;
/// 2) we shall not compress centroids further than `max_centroids` parameter requires because
/// this will lead to uneven compression.
/// N.B. We cannot merge all the same values into single centroids because this will lead to
/// unbalanced compression and wrong results.
/// For more information see: https://arxiv.org/abs/1902.04023
/// The ratio of the part of the histogram to l, including the half l to the entire histogram. That is, what level quantile in position l.
@ -225,12 +244,15 @@ public:
* and at the edges decreases and is approximately equal to the distance to the edge * 4.
*/
if (l_count + r->count <= k)
if (l_count + r->count <= k && canBeMerged(l_mean, r->mean))
{
// it is possible to merge left and right
/// The left column "eats" the right.
l_count += r->count;
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
if (r->mean != l_mean) /// Handling infinities of the same sign well.
{
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
}
l->mean = l_mean;
l->count = l_count;
}
@ -254,6 +276,7 @@ public:
centroids.resize(l - centroids.begin() + 1);
unmerged = 0;
}
// Ensures centroids.size() < max_centroids, independent of unprovable floating point blackbox above
compressBrute();
}
@ -298,10 +321,17 @@ public:
for (const auto & c : centroids)
{
if (c.count <= 0 || std::isnan(c.count) || std::isnan(c.mean)) // invalid count breaks compress(), invalid mean breaks sort()
if (c.count <= 0 || std::isnan(c.count)) // invalid count breaks compress()
throw Exception("Invalid centroid " + std::to_string(c.count) + ":" + std::to_string(c.mean), ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED);
count += c.count;
if (!std::isnan(c.mean))
{
count += c.count;
}
}
auto it = std::remove_if(centroids.begin(), centroids.end(), [](Centroid & c) { return std::isnan(c.mean); });
centroids.erase(it, centroids.end());
compress(); // Allows reading/writing TDigests with different epsilon/max_centroids params
}
@ -312,7 +342,7 @@ public:
ResultType getImpl(Float64 level)
{
if (centroids.empty())
return std::is_floating_point_v<ResultType> ? NAN : 0;
return std::is_floating_point_v<ResultType> ? std::numeric_limits<ResultType>::quiet_NaN() : 0;
compress();
@ -395,7 +425,6 @@ public:
while (current_x >= x)
{
if (x <= left)
result[levels_permutation[result_num]] = prev_mean;
else if (x >= right)

View File

@ -25,6 +25,34 @@ namespace
{
using SizeAndChecksum = IBackupCoordination::SizeAndChecksum;
using FileInfo = IBackupCoordination::FileInfo;
using PartNameAndChecksum = IBackupCoordination::PartNameAndChecksum;
String serializePartNamesAndChecksums(const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
WriteBufferFromOwnString out;
writeBinary(part_names_and_checksums.size(), out);
for (const auto & part_name_and_checksum : part_names_and_checksums)
{
writeBinary(part_name_and_checksum.part_name, out);
writeBinary(part_name_and_checksum.checksum, out);
}
return out.str();
}
std::vector<PartNameAndChecksum> deserializePartNamesAndChecksums(const String & str)
{
ReadBufferFromString in{str};
std::vector<PartNameAndChecksum> part_names_and_checksums;
size_t num;
readBinary(num, in);
part_names_and_checksums.resize(num);
for (size_t i = 0; i != num; ++i)
{
readBinary(part_names_and_checksums[i].part_name, in);
readBinary(part_names_and_checksums[i].checksum, in);
}
return part_names_and_checksums;
}
String serializeFileInfo(const FileInfo & info)
{
@ -92,7 +120,9 @@ namespace
}
BackupCoordinationDistributed::BackupCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
: zookeeper_path(zookeeper_path_), get_zookeeper(get_zookeeper_)
: zookeeper_path(zookeeper_path_)
, get_zookeeper(get_zookeeper_)
, preparing_barrier(zookeeper_path_ + "/preparing", get_zookeeper_, "BackupCoordination", "preparing")
{
createRootNodes();
}
@ -104,6 +134,8 @@ void BackupCoordinationDistributed::createRootNodes()
auto zookeeper = get_zookeeper();
zookeeper->createAncestors(zookeeper_path);
zookeeper->createIfNotExists(zookeeper_path, "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_paths", "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_parts", "");
zookeeper->createIfNotExists(zookeeper_path + "/file_names", "");
zookeeper->createIfNotExists(zookeeper_path + "/file_infos", "");
zookeeper->createIfNotExists(zookeeper_path + "/archive_suffixes", "");
@ -115,6 +147,102 @@ void BackupCoordinationDistributed::removeAllNodes()
zookeeper->removeRecursive(zookeeper_path);
}
void BackupCoordinationDistributed::addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_paths/" + escapeForFileName(table_zk_path);
zookeeper->createIfNotExists(path, "");
path += "/" + escapeForFileName(table_data_path);
zookeeper->createIfNotExists(path, "");
}
void BackupCoordinationDistributed::addReplicatedTablePartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_parts/" + escapeForFileName(table_zk_path);
zookeeper->createIfNotExists(path, "");
path += "/" + escapeForFileName(host_id);
zookeeper->createIfNotExists(path, "");
path += "/" + escapeForFileName(table_name.first);
zookeeper->createIfNotExists(path, "");
path += "/" + escapeForFileName(table_name.second);
zookeeper->create(path, serializePartNamesAndChecksums(part_names_and_checksums), zkutil::CreateMode::Persistent);
}
void BackupCoordinationDistributed::finishPreparing(const String & host_id, const String & error_message)
{
preparing_barrier.finish(host_id, error_message);
}
void BackupCoordinationDistributed::waitForAllHostsPrepared(const Strings & host_ids, std::chrono::seconds timeout) const
{
preparing_barrier.waitForAllHostsToFinish(host_ids, timeout);
prepareReplicatedTablesInfo();
}
void BackupCoordinationDistributed::prepareReplicatedTablesInfo() const
{
replicated_tables.emplace();
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_paths";
for (const String & escaped_table_zk_path : zookeeper->getChildren(path))
{
String table_zk_path = unescapeForFileName(escaped_table_zk_path);
for (const String & escaped_data_path : zookeeper->getChildren(path + "/" + escaped_table_zk_path))
{
String data_path = unescapeForFileName(escaped_data_path);
replicated_tables->addDataPath(table_zk_path, data_path);
}
}
path = zookeeper_path + "/repl_tables_parts";
for (const String & escaped_table_zk_path : zookeeper->getChildren(path))
{
String table_zk_path = unescapeForFileName(escaped_table_zk_path);
String path2 = path + "/" + escaped_table_zk_path;
for (const String & escaped_host_id : zookeeper->getChildren(path2))
{
String host_id = unescapeForFileName(escaped_host_id);
String path3 = path2 + "/" + escaped_host_id;
for (const String & escaped_database_name : zookeeper->getChildren(path3))
{
String database_name = unescapeForFileName(escaped_database_name);
String path4 = path3 + "/" + escaped_database_name;
for (const String & escaped_table_name : zookeeper->getChildren(path4))
{
String table_name = unescapeForFileName(escaped_table_name);
String path5 = path4 + "/" + escaped_table_name;
auto part_names_and_checksums = deserializePartNamesAndChecksums(zookeeper->get(path5));
replicated_tables->addPartNames(host_id, {database_name, table_name}, table_zk_path, part_names_and_checksums);
}
}
}
}
replicated_tables->preparePartNamesByLocations();
}
Strings BackupCoordinationDistributed::getReplicatedTableDataPaths(const String & table_zk_path) const
{
return replicated_tables->getDataPaths(table_zk_path);
}
Strings BackupCoordinationDistributed::getReplicatedTablePartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const
{
return replicated_tables->getPartNames(host_id, table_name, table_zk_path);
}
void BackupCoordinationDistributed::addFileInfo(const FileInfo & file_info, bool & is_data_file_required)
{
auto zookeeper = get_zookeeper();

View File

@ -1,6 +1,7 @@
#pragma once
#include <Backups/IBackupCoordination.h>
#include <Backups/BackupCoordinationHelpers.h>
#include <Common/ZooKeeper/Common.h>
#include <map>
#include <unordered_map>
@ -16,6 +17,19 @@ public:
BackupCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_);
~BackupCoordinationDistributed() override;
void addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path) override;
void addReplicatedTablePartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
void finishPreparing(const String & host_id, const String & error_message) override;
void waitForAllHostsPrepared(const Strings & host_ids, std::chrono::seconds timeout) const override;
Strings getReplicatedTableDataPaths(const String & table_zk_path) const override;
Strings getReplicatedTablePartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const override;
void addFileInfo(const FileInfo & file_info, bool & is_data_file_required) override;
void updateFileInfo(const FileInfo & file_info) override;
@ -33,9 +47,12 @@ public:
private:
void createRootNodes();
void removeAllNodes();
void prepareReplicatedTablesInfo() const;
const String zookeeper_path;
const zkutil::GetZooKeeper get_zookeeper;
BackupCoordinationDistributedBarrier preparing_barrier;
mutable std::optional<BackupCoordinationReplicatedTablesInfo> replicated_tables;
};
}

View File

@ -0,0 +1,416 @@
#include <Backups/BackupCoordinationHelpers.h>
#include <Storages/MergeTree/MergeTreePartInfo.h>
#include <Common/Exception.h>
#include <base/chrono_io.h>
#include <boost/range/adaptor/map.hpp>
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_BACKUP_TABLE;
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
extern const int LOGICAL_ERROR;
}
struct BackupCoordinationReplicatedTablesInfo::HostAndTableName
{
String host_id;
DatabaseAndTableName table_name;
struct Less
{
bool operator()(const HostAndTableName & lhs, const HostAndTableName & rhs) const
{
return (lhs.host_id < rhs.host_id) || ((lhs.host_id == rhs.host_id) && (lhs.table_name < rhs.table_name));
}
bool operator()(const std::shared_ptr<const HostAndTableName> & lhs, const std::shared_ptr<const HostAndTableName> & rhs) const
{
return operator()(*lhs, *rhs);
}
};
};
class BackupCoordinationReplicatedTablesInfo::CoveredPartsFinder
{
public:
CoveredPartsFinder() = default;
void addPart(const String & new_part_name, const std::shared_ptr<const HostAndTableName> & host_and_table_name)
{
addPart(MergeTreePartInfo::fromPartName(new_part_name, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING), host_and_table_name);
}
void addPart(MergeTreePartInfo && new_part_info, const std::shared_ptr<const HostAndTableName> & host_and_table_name)
{
auto new_min_block = new_part_info.min_block;
auto new_max_block = new_part_info.max_block;
auto & parts = partitions[new_part_info.partition_id];
/// Find the first part with max_block >= `part_info.min_block`.
auto first_it = parts.lower_bound(new_min_block);
if (first_it == parts.end())
{
/// All max_blocks < part_info.min_block, so we can safely add the `part_info` to the list of parts.
parts.emplace(new_max_block, PartInfo{std::move(new_part_info), host_and_table_name});
return;
}
{
/// part_info.min_block <= current_info.max_block
const auto & part = first_it->second;
if (new_max_block < part.info.min_block)
{
/// (prev_info.max_block < part_info.min_block) AND (part_info.max_block < current_info.min_block),
/// so we can safely add the `part_info` to the list of parts.
parts.emplace(new_max_block, PartInfo{std::move(new_part_info), host_and_table_name});
return;
}
/// (part_info.min_block <= current_info.max_block) AND (part_info.max_block >= current_info.min_block), parts intersect.
if (part.info.contains(new_part_info))
{
/// `part_info` is already contained in another part.
return;
}
}
/// Probably `part_info` is going to replace multiple parts, find the range of parts to replace.
auto last_it = first_it;
while (last_it != parts.end())
{
const auto & part = last_it->second;
if (part.info.min_block > new_max_block)
break;
if (!new_part_info.contains(part.info))
{
throw Exception(
ErrorCodes::CANNOT_BACKUP_TABLE,
"Intersected parts detected: {} in the table {}.{}{} and {} in the table {}.{}{}. It should be investigated",
part.info.getPartName(),
part.host_and_table_name->table_name.first,
part.host_and_table_name->table_name.second,
part.host_and_table_name->host_id.empty() ? "" : (" on the host " + part.host_and_table_name->host_id),
new_part_info.getPartName(),
host_and_table_name->table_name.first,
host_and_table_name->table_name.second,
host_and_table_name->host_id.empty() ? "" : (" on the host " + host_and_table_name->host_id));
}
++last_it;
}
/// `part_info` will replace multiple parts [first_it..last_it)
parts.erase(first_it, last_it);
parts.emplace(new_max_block, PartInfo{std::move(new_part_info), host_and_table_name});
}
bool isCoveredByAnotherPart(const String & part_name) const
{
return isCoveredByAnotherPart(MergeTreePartInfo::fromPartName(part_name, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING));
}
bool isCoveredByAnotherPart(const MergeTreePartInfo & part_info) const
{
auto partition_it = partitions.find(part_info.partition_id);
if (partition_it == partitions.end())
return false;
const auto & parts = partition_it->second;
/// Find the first part with max_block >= `part_info.min_block`.
auto it_part = parts.lower_bound(part_info.min_block);
if (it_part == parts.end())
{
/// All max_blocks < part_info.min_block, so there is no parts covering `part_info`.
return false;
}
/// part_info.min_block <= current_info.max_block
const auto & existing_part = it_part->second;
if (part_info.max_block < existing_part.info.min_block)
{
/// (prev_info.max_block < part_info.min_block) AND (part_info.max_block < current_info.min_block),
/// so there is no parts covering `part_info`.
return false;
}
/// (part_info.min_block <= current_info.max_block) AND (part_info.max_block >= current_info.min_block), parts intersect.
if (existing_part.info == part_info)
{
/// It's the same part, it's kind of covers itself, but we check in this function whether a part is covered by another part.
return false;
}
/// Check if `part_info` is covered by `current_info`.
return existing_part.info.contains(part_info);
}
private:
struct PartInfo
{
MergeTreePartInfo info;
std::shared_ptr<const HostAndTableName> host_and_table_name;
};
using Parts = std::map<Int64 /* max_block */, PartInfo>;
std::unordered_map<String, Parts> partitions;
};
void BackupCoordinationReplicatedTablesInfo::addDataPath(const String & table_zk_path, const String & table_data_path)
{
tables[table_zk_path].data_paths.push_back(table_data_path);
}
Strings BackupCoordinationReplicatedTablesInfo::getDataPaths(const String & table_zk_path) const
{
auto it = tables.find(table_zk_path);
if (it == tables.end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "getDataPaths() called for unknown table_zk_path: {}", table_zk_path);
const auto & replicated_table = it->second;
return replicated_table.data_paths;
}
void BackupCoordinationReplicatedTablesInfo::addPartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
auto & table = tables[table_zk_path];
auto & part_locations_by_names = table.part_locations_by_names;
auto host_and_table_name = std::make_shared<HostAndTableName>();
host_and_table_name->host_id = host_id;
host_and_table_name->table_name = table_name;
for (const auto & part_name_and_checksum : part_names_and_checksums)
{
const auto & part_name = part_name_and_checksum.part_name;
const auto & checksum = part_name_and_checksum.checksum;
auto it = part_locations_by_names.find(part_name);
if (it == part_locations_by_names.end())
{
it = part_locations_by_names.emplace(part_name, PartLocations{}).first;
it->second.checksum = checksum;
}
else
{
const auto & existing = it->second;
if (existing.checksum != checksum)
{
const auto & existing_host_and_table_name = **existing.host_and_table_names.begin();
throw Exception(
ErrorCodes::CANNOT_BACKUP_TABLE,
"Table {}.{} has part {} which is different from the part of table {}.{}. Must be the same",
table_name.first,
table_name.second,
part_name,
existing_host_and_table_name.table_name.first,
existing_host_and_table_name.table_name.second);
}
}
auto & host_and_table_names = it->second.host_and_table_names;
/// `host_and_table_names` should be ordered because we need this vector to be in the same order on every replica.
host_and_table_names.insert(
std::upper_bound(host_and_table_names.begin(), host_and_table_names.end(), host_and_table_name, HostAndTableName::Less{}),
host_and_table_name);
}
}
Strings BackupCoordinationReplicatedTablesInfo::getPartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const
{
if (!part_names_by_locations_prepared)
throw Exception(ErrorCodes::LOGICAL_ERROR, "preparePartNamesByLocations() was not called before getPartNames()");
auto it = tables.find(table_zk_path);
if (it == tables.end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "getPartNames() called for unknown table_zk_path: {}", table_zk_path);
const auto & table = it->second;
auto it2 = table.part_names_by_locations.find(host_id);
if (it2 == table.part_names_by_locations.end())
return {};
const auto & part_names_by_host_id = it2->second;
auto it3 = part_names_by_host_id.find(table_name);
if (it3 == part_names_by_host_id.end())
return {};
return it3->second;
}
void BackupCoordinationReplicatedTablesInfo::preparePartNamesByLocations()
{
if (part_names_by_locations_prepared)
return;
part_names_by_locations_prepared = true;
size_t counter = 0;
for (auto & table : tables | boost::adaptors::map_values)
{
CoveredPartsFinder covered_parts_finder;
for (const auto & [part_name, part_locations] : table.part_locations_by_names)
covered_parts_finder.addPart(part_name, *part_locations.host_and_table_names.begin());
table.part_names_by_locations.clear();
for (const auto & [part_name, part_locations] : table.part_locations_by_names)
{
if (covered_parts_finder.isCoveredByAnotherPart(part_name))
continue;
size_t chosen_index = (counter++) % part_locations.host_and_table_names.size();
const auto & chosen_host_id = part_locations.host_and_table_names[chosen_index]->host_id;
const auto & chosen_table_name = part_locations.host_and_table_names[chosen_index]->table_name;
table.part_names_by_locations[chosen_host_id][chosen_table_name].push_back(part_name);
}
}
}
BackupCoordinationDistributedBarrier::BackupCoordinationDistributedBarrier(
const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, const String & logger_name_, const String & operation_name_)
: zookeeper_path(zookeeper_path_)
, get_zookeeper(get_zookeeper_)
, log(&Poco::Logger::get(logger_name_))
, operation_name(operation_name_)
{
createRootNodes();
}
void BackupCoordinationDistributedBarrier::createRootNodes()
{
auto zookeeper = get_zookeeper();
zookeeper->createAncestors(zookeeper_path);
zookeeper->createIfNotExists(zookeeper_path, "");
}
void BackupCoordinationDistributedBarrier::finish(const String & host_id, const String & error_message)
{
if (error_message.empty())
LOG_TRACE(log, "Host {} has finished {}", host_id, operation_name);
else
LOG_ERROR(log, "Host {} has failed {} with message: {}", host_id, operation_name, error_message);
auto zookeeper = get_zookeeper();
if (error_message.empty())
zookeeper->create(zookeeper_path + "/" + host_id + ":ready", "", zkutil::CreateMode::Persistent);
else
zookeeper->create(zookeeper_path + "/" + host_id + ":error", error_message, zkutil::CreateMode::Persistent);
}
void BackupCoordinationDistributedBarrier::waitForAllHostsToFinish(const Strings & host_ids, const std::chrono::seconds timeout) const
{
auto zookeeper = get_zookeeper();
bool all_hosts_ready = false;
String not_ready_host_id;
String error_host_id;
String error_message;
/// Returns true of everything's ready, or false if we need to wait more.
auto process_nodes = [&](const Strings & nodes)
{
std::unordered_set<std::string_view> set{nodes.begin(), nodes.end()};
for (const String & host_id : host_ids)
{
if (set.contains(host_id + ":error"))
{
error_host_id = host_id;
error_message = zookeeper->get(zookeeper_path + "/" + host_id + ":error");
return;
}
if (!set.contains(host_id + ":ready"))
{
LOG_TRACE(log, "Waiting for host {} {}", host_id, operation_name);
not_ready_host_id = host_id;
return;
}
}
all_hosts_ready = true;
};
std::atomic<bool> watch_set = false;
std::condition_variable watch_triggered_event;
auto watch_callback = [&](const Coordination::WatchResponse &)
{
watch_set = false; /// After it's triggered it's not set until we call getChildrenWatch() again.
watch_triggered_event.notify_all();
};
auto watch_triggered = [&] { return !watch_set; };
bool use_timeout = (timeout.count() >= 0);
std::chrono::steady_clock::duration time_left = timeout;
std::mutex dummy_mutex;
while (true)
{
if (use_timeout && (time_left.count() <= 0))
{
Strings children = zookeeper->getChildren(zookeeper_path);
process_nodes(children);
break;
}
watch_set = true;
Strings children = zookeeper->getChildrenWatch(zookeeper_path, nullptr, watch_callback);
process_nodes(children);
if (!error_message.empty() || all_hosts_ready)
break;
{
std::unique_lock dummy_lock{dummy_mutex};
if (use_timeout)
{
std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
if (!watch_triggered_event.wait_for(dummy_lock, time_left, watch_triggered))
break;
time_left -= (std::chrono::steady_clock::now() - start_time);
}
else
watch_triggered_event.wait(dummy_lock, watch_triggered);
}
}
if (watch_set)
{
/// Remove watch by triggering it.
zookeeper->create(zookeeper_path + "/remove_watch-", "", zkutil::CreateMode::EphemeralSequential);
std::unique_lock dummy_lock{dummy_mutex};
watch_triggered_event.wait_for(dummy_lock, timeout, watch_triggered);
}
if (!error_message.empty())
{
throw Exception(
ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE,
"Host {} failed {} with message: {}",
error_host_id,
operation_name,
error_message);
}
if (all_hosts_ready)
{
LOG_TRACE(log, "All hosts have finished {}", operation_name);
return;
}
throw Exception(
ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE,
"Host {} has failed {}: Time ({}) is out",
not_ready_host_id,
operation_name,
to_string(timeout));
}
}

View File

@ -0,0 +1,90 @@
#pragma once
#include <Backups/IBackupCoordination.h>
#include <Common/ZooKeeper/Common.h>
#include <map>
#include <unordered_map>
namespace DB
{
/// Helper designed to be used in an implementation of the IBackupCoordination interface in the part related to replicated tables.
class BackupCoordinationReplicatedTablesInfo
{
public:
BackupCoordinationReplicatedTablesInfo() = default;
/// Adds a data path in backup for a replicated table.
/// Multiple replicas of the replicated table call this function and then all the added paths can be returned by call of the function
/// getReplicatedTableDataPaths().
void addDataPath(const String & table_zk_path, const String & table_data_path);
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedTableDataPath()).
Strings getDataPaths(const String & table_zk_path) const;
using PartNameAndChecksum = IBackupCoordination::PartNameAndChecksum;
/// Adds part names which a specified replica of a replicated table is going to put to the backup.
/// Multiple replicas of the replicated table call this function and then the added part names can be returned by call of the function
/// getReplicatedTablePartNames().
/// Checksums are used only to control that parts under the same names on different replicas are the same.
void addPartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums);
void preparePartNamesByLocations();
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
/// This is the same list as it was added by call of the function addReplicatedTablePartNames() but without duplications and without
/// parts covered by another parts.
Strings getPartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const;
private:
class CoveredPartsFinder;
struct HostAndTableName;
struct PartLocations
{
std::vector<std::shared_ptr<const HostAndTableName>> host_and_table_names;
UInt128 checksum;
};
struct TableInfo
{
Strings data_paths;
std::map<String /* part_name */, PartLocations> part_locations_by_names; /// Should be ordered because we need this map to be in the same order on every replica.
std::unordered_map<String /* host_id */, std::map<DatabaseAndTableName, Strings /* part_names */>> part_names_by_locations;
};
std::unordered_map<String /* zk_path */, TableInfo> tables;
bool part_names_by_locations_prepared = false;
};
/// Helper designed to be used in the implementation of the BackupCoordinationDistributed and RestoreCoordinationDistributed classes
/// to implement synchronization when we need all hosts to finish a specific task and then continue.
class BackupCoordinationDistributedBarrier
{
public:
BackupCoordinationDistributedBarrier(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, const String & logger_name_, const String & operation_name_);
/// Sets that a specified host has finished the specific task, successfully or with an error.
/// In the latter case `error_message` should be set.
void finish(const String & host_id, const String & error_message = {});
/// Waits for a specified list of hosts to finish the specific task.
void waitForAllHostsToFinish(const Strings & host_ids, const std::chrono::seconds timeout = std::chrono::seconds(-1) /* no timeout */) const;
private:
void createRootNodes();
String zookeeper_path;
zkutil::GetZooKeeper get_zookeeper;
const Poco::Logger * log;
String operation_name;
};
}

View File

@ -1,15 +1,59 @@
#include <Backups/BackupCoordinationLocal.h>
#include <Common/Exception.h>
#include <Common/logger_useful.h>
#include <fmt/format.h>
namespace DB
{
using SizeAndChecksum = IBackupCoordination::SizeAndChecksum;
using FileInfo = IBackupCoordination::FileInfo;
BackupCoordinationLocal::BackupCoordinationLocal() = default;
BackupCoordinationLocal::BackupCoordinationLocal() : log(&Poco::Logger::get("BackupCoordination"))
{
}
BackupCoordinationLocal::~BackupCoordinationLocal() = default;
void BackupCoordinationLocal::addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path)
{
std::lock_guard lock{mutex};
replicated_tables.addDataPath(table_zk_path, table_data_path);
}
void BackupCoordinationLocal::addReplicatedTablePartNames(const String & /* host_id */, const DatabaseAndTableName & table_name, const String & table_zk_path, const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
std::lock_guard lock{mutex};
replicated_tables.addPartNames("", table_name, table_zk_path, part_names_and_checksums);
}
void BackupCoordinationLocal::finishPreparing(const String & /* host_id */, const String & error_message)
{
LOG_TRACE(log, "Finished preparing{}", (error_message.empty() ? "" : (" with error " + error_message)));
if (!error_message.empty())
return;
replicated_tables.preparePartNamesByLocations();
}
void BackupCoordinationLocal::waitForAllHostsPrepared(const Strings & /* host_ids */, std::chrono::seconds /* timeout */) const
{
}
Strings BackupCoordinationLocal::getReplicatedTableDataPaths(const String & table_zk_path) const
{
std::lock_guard lock{mutex};
return replicated_tables.getDataPaths(table_zk_path);
}
Strings BackupCoordinationLocal::getReplicatedTablePartNames(const String & /* host_id */, const DatabaseAndTableName & table_name, const String & table_zk_path) const
{
std::lock_guard lock{mutex};
return replicated_tables.getPartNames("", table_name, table_zk_path);
}
void BackupCoordinationLocal::addFileInfo(const FileInfo & file_info, bool & is_data_file_required)
{
std::lock_guard lock{mutex};

View File

@ -1,10 +1,13 @@
#pragma once
#include <Backups/IBackupCoordination.h>
#include <Backups/BackupCoordinationHelpers.h>
#include <map>
#include <mutex>
namespace Poco { class Logger; }
namespace DB
{
@ -15,6 +18,19 @@ public:
BackupCoordinationLocal();
~BackupCoordinationLocal() override;
void addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path) override;
void addReplicatedTablePartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
void finishPreparing(const String & host_id, const String & error_message) override;
void waitForAllHostsPrepared(const Strings & host_ids, std::chrono::seconds timeout) const override;
Strings getReplicatedTableDataPaths(const String & table_zk_path) const override;
Strings getReplicatedTablePartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const override;
void addFileInfo(const FileInfo & file_info, bool & is_data_file_required) override;
void updateFileInfo(const FileInfo & file_info) override;
@ -30,10 +46,14 @@ public:
private:
mutable std::mutex mutex;
BackupCoordinationReplicatedTablesInfo replicated_tables;
std::map<String /* file_name */, SizeAndChecksum> file_names; /// Should be ordered alphabetically, see listFiles(). For empty files we assume checksum = 0.
std::map<SizeAndChecksum, FileInfo> file_infos; /// Information about files. Without empty files.
Strings archive_suffixes;
size_t current_archive_suffix = 0;
const Poco::Logger * log;
};
}

View File

@ -12,6 +12,7 @@
namespace DB
{
class IBackupCoordination;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
@ -32,7 +33,7 @@ public:
String password;
ContextPtr context;
bool is_internal_backup = false;
String coordination_zk_path;
std::shared_ptr<IBackupCoordination> backup_coordination;
};
static BackupFactory & instance();

View File

@ -125,11 +125,10 @@ BackupImpl::BackupImpl(
, reader(std::move(reader_))
, is_internal_backup(false)
, coordination(std::make_shared<BackupCoordinationLocal>())
, context(context_)
, version(INITIAL_BACKUP_VERSION)
, base_backup_info(base_backup_info_)
{
open();
open(context_);
}
@ -141,24 +140,20 @@ BackupImpl::BackupImpl(
const ContextPtr & context_,
const std::optional<UUID> & backup_uuid_,
bool is_internal_backup_,
const String & coordination_zk_path_)
const std::shared_ptr<IBackupCoordination> & coordination_)
: backup_name(backup_name_)
, archive_params(archive_params_)
, use_archives(!archive_params.archive_name.empty())
, open_mode(OpenMode::WRITE)
, writer(std::move(writer_))
, is_internal_backup(is_internal_backup_)
, context(context_)
, coordination(coordination_ ? coordination_ : std::make_shared<BackupCoordinationLocal>())
, uuid(backup_uuid_)
, version(CURRENT_BACKUP_VERSION)
, base_backup_info(base_backup_info_)
, log(&Poco::Logger::get("Backup"))
{
if (coordination_zk_path_.empty())
coordination = std::make_shared<BackupCoordinationLocal>();
else
coordination = std::make_shared<BackupCoordinationDistributed>(coordination_zk_path_, [&] { return context->getZooKeeper(); });
open();
open(context_);
}
@ -168,7 +163,7 @@ BackupImpl::~BackupImpl()
}
void BackupImpl::open()
void BackupImpl::open(const ContextPtr & context)
{
std::lock_guard lock{mutex};
@ -224,17 +219,21 @@ void BackupImpl::close()
std::lock_guard lock{mutex};
if (!is_internal_backup && writing_finalized)
{
LOG_TRACE(log, "Finalizing backup {}", backup_name);
writeBackupMetadata();
LOG_INFO(log, "Finalized backup {}", backup_name);
}
archive_readers.clear();
for (auto & archive_writer : archive_writers)
archive_writer = {"", nullptr};
if (!is_internal_backup && writer && !writing_finalized)
{
LOG_INFO(log, "Removing all files of backup {} after failure", backup_name);
removeAllFilesAfterFailure();
if (!is_internal_backup)
coordination->drop();
}
}
time_t BackupImpl::getTimestamp() const

View File

@ -49,7 +49,7 @@ public:
const ContextPtr & context_,
const std::optional<UUID> & backup_uuid_ = {},
bool is_internal_backup_ = false,
const String & coordination_zk_path_ = {});
const std::shared_ptr<IBackupCoordination> & coordination_ = {});
~BackupImpl() override;
@ -73,7 +73,7 @@ private:
using FileInfo = IBackupCoordination::FileInfo;
class BackupEntryFromBackupImpl;
void open();
void open(const ContextPtr & context);
void close();
void writeBackupMetadata();
void readBackupMetadata();
@ -90,7 +90,6 @@ private:
std::shared_ptr<IBackupReader> reader;
const bool is_internal_backup;
std::shared_ptr<IBackupCoordination> coordination;
ContextPtr context;
mutable std::mutex mutex;
std::optional<UUID> uuid;
@ -103,6 +102,7 @@ private:
std::pair<String, std::shared_ptr<IArchiveWriter>> archive_writers[2];
String current_archive_suffix;
bool writing_finalized = false;
const Poco::Logger * log;
};
}

View File

@ -24,7 +24,6 @@ namespace ErrorCodes
M(Bool, async) \
M(UInt64, shard_num) \
M(UInt64, replica_num) \
M(Bool, allow_storing_multiple_replicas) \
M(Bool, internal) \
M(String, host_id) \
M(String, coordination_zk_path)

View File

@ -11,7 +11,8 @@ class ASTBackupQuery;
/// Settings specified in the "SETTINGS" clause of a BACKUP query.
struct BackupSettings
{
/// Base backup, if it's set an incremental backup will be built.
/// Base backup, if it's set an incremental backup will be built. That means only differences made after the base backup will be put
/// into a new backup.
std::optional<BackupInfo> base_backup_info;
/// Compression method and level for writing the backup (when applicable).
@ -36,9 +37,6 @@ struct BackupSettings
/// Can only be used with BACKUP ON CLUSTER.
size_t replica_num = 0;
/// Allows storing in the backup of multiple replicas.
bool allow_storing_multiple_replicas = false;
/// Internal, should not be specified by user.
/// Whether this backup is a part of a distributed backup created by BACKUP ON CLUSTER.
bool internal = false;

View File

@ -4,16 +4,18 @@
#include <Backups/DDLCompareUtils.h>
#include <Backups/DDLRenamingVisitor.h>
#include <Backups/IBackup.h>
#include <Backups/IBackupCoordination.h>
#include <Backups/formatTableNameOrTemporaryTableName.h>
#include <Backups/replaceTableUUIDWithMacroInReplicatedTableDef.h>
#include <Common/escapeForFileName.h>
#include <Access/Common/AccessFlags.h>
#include <Access/Common/AccessRightsElement.h>
#include <Databases/IDatabase.h>
#include <Interpreters/Context.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/formatAST.h>
#include <Storages/IStorage.h>
#include <Storages/StorageReplicatedMergeTree.h>
namespace DB
@ -98,13 +100,57 @@ namespace
class BackupEntriesBuilder
{
public:
BackupEntriesBuilder(const ContextPtr & context_, const BackupSettings & backup_settings_)
: context(context_), backup_settings(backup_settings_)
BackupEntriesBuilder(const ContextPtr & context_, const BackupSettings & backup_settings_, std::shared_ptr<IBackupCoordination> backup_coordination_)
: context(context_), backup_settings(backup_settings_), backup_coordination(backup_coordination_)
{
}
/// Prepares internal structures for making backup entries.
void prepare(const ASTBackupQuery::Elements & elements)
void prepare(const ASTBackupQuery::Elements & elements, std::chrono::seconds timeout_for_other_nodes_to_prepare)
{
try
{
prepareImpl(elements);
}
catch (...)
{
backup_coordination->finishPreparing(backup_settings.host_id, getCurrentExceptionMessage(false));
throw;
}
/// We've finished restoring metadata, now we will wait for other replicas and shards to finish too.
/// We need this waiting because we're going to call some functions which requires data collected from other nodes too,
/// see IRestoreCoordination::checkTablesNotExistedInReplicatedDBs(), IRestoreCoordination::getReplicatedTableDataPath().
backup_coordination->finishPreparing(backup_settings.host_id);
backup_coordination->waitForAllHostsPrepared(
BackupSettings::Util::filterHostIDs(
backup_settings.cluster_host_ids, backup_settings.shard_num, backup_settings.replica_num),
timeout_for_other_nodes_to_prepare);
}
/// Makes backup entries, should be called after prepare().
BackupEntries makeBackupEntries() const
{
BackupEntries res;
for (const auto & info : databases | boost::adaptors::map_values)
res.push_back(makeBackupEntryForMetadata(*info.create_query));
for (const auto & info : tables | boost::adaptors::map_values)
{
res.push_back(makeBackupEntryForMetadata(*info.create_query));
appendBackupEntriesForData(res, info);
}
/// A backup cannot be empty.
if (res.empty())
throw Exception("Backup must not be empty", ErrorCodes::BACKUP_IS_EMPTY);
return res;
}
private:
void prepareImpl(const ASTBackupQuery::Elements & elements)
{
calculateShardNumAndReplicaNumInBackup();
renaming_settings.setFromBackupQuery(elements);
@ -135,36 +181,6 @@ namespace
}
}
/// Makes backup entries, should be called after prepare().
BackupEntries makeBackupEntries() const
{
BackupEntries res;
for (const auto & info : databases | boost::adaptors::map_values)
res.push_back(makeBackupEntryForMetadata(*info.create_query));
for (const auto & info : tables | boost::adaptors::map_values)
{
res.push_back(makeBackupEntryForMetadata(*info.create_query));
if (info.has_data)
{
auto data_backup = info.storage->backupData(context, info.partitions);
if (!data_backup.empty())
{
String data_path = PathsInBackup::getDataPath(*info.create_query, shard_num_in_backup, replica_num_in_backup);
for (auto & [path_in_backup, backup_entry] : data_backup)
res.emplace_back(data_path + path_in_backup, std::move(backup_entry));
}
}
}
/// A backup cannot be empty.
if (res.empty())
throw Exception("Backup must not be empty", ErrorCodes::BACKUP_IS_EMPTY);
return res;
}
private:
void calculateShardNumAndReplicaNumInBackup()
{
size_t shard_num = 0;
@ -187,8 +203,6 @@ namespace
void prepareToBackupTable(const DatabaseAndTableName & table_name_, const DatabaseAndTable & table_, const ASTs & partitions_)
{
context->checkAccess(AccessType::SHOW_TABLES, table_name_.first, table_name_.second);
const auto & database = table_.first;
const auto & storage = table_.second;
@ -206,22 +220,72 @@ namespace
/// Make a create query for this table.
auto create_query = prepareCreateQueryForBackup(database->getCreateTableQuery(table_name_.second, context));
String data_path = PathsInBackup::getDataPath(*create_query, shard_num_in_backup, replica_num_in_backup);
bool has_data = storage->hasDataToBackup() && !backup_settings.structure_only;
if (has_data)
{
/// We check for SELECT privilege only if we're going to read data from the table.
context->checkAccess(AccessType::SELECT, table_name_.first, table_name_.second);
}
String zk_path;
BackupEntries data = prepareToBackupTableData(table_name_, storage, partitions_, data_path, zk_path);
CreateTableInfo info;
TableInfo info;
info.table_name = table_name_;
info.create_query = create_query;
info.storage = storage;
info.partitions = partitions_;
info.has_data = has_data;
info.data = std::move(data);
info.data_path = std::move(data_path);
info.zk_path = std::move(zk_path);
tables[name_in_backup] = std::move(info);
}
BackupEntries prepareToBackupTableData(const DatabaseAndTableName & table_name_, const StoragePtr & storage_, const ASTs & partitions_, const String & data_path, String & zk_path)
{
zk_path.clear();
const StorageReplicatedMergeTree * replicated_table = typeid_cast<const StorageReplicatedMergeTree *>(storage_.get());
bool has_data = (storage_->hasDataToBackup() || replicated_table) && !backup_settings.structure_only;
if (!has_data)
return {};
BackupEntries data = storage_->backupData(context, partitions_);
if (!replicated_table)
return data;
zk_path = replicated_table->getZooKeeperName() + replicated_table->getZooKeeperPath();
backup_coordination->addReplicatedTableDataPath(zk_path, data_path);
std::unordered_map<String, SipHash> parts;
for (const auto & [relative_path, backup_entry] : data)
{
size_t slash_pos = relative_path.find('/');
if (slash_pos != String::npos)
{
String part_name = relative_path.substr(0, slash_pos);
if (MergeTreePartInfo::tryParsePartName(part_name, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING))
{
auto & hash = parts[part_name];
if (relative_path.ends_with(".bin"))
{
auto checksum = backup_entry->getChecksum();
hash.update(relative_path);
hash.update(backup_entry->getSize());
hash.update(*checksum);
}
}
}
}
std::vector<IBackupCoordination::PartNameAndChecksum> part_names_and_checksums;
part_names_and_checksums.reserve(parts.size());
for (auto & [part_name, hash] : parts)
{
UInt128 checksum;
hash.get128(checksum);
auto & part_name_and_checksum = part_names_and_checksums.emplace_back();
part_name_and_checksum.part_name = part_name;
part_name_and_checksum.checksum = checksum;
}
backup_coordination->addReplicatedTablePartNames(backup_settings.host_id, table_name_, zk_path, part_names_and_checksums);
return data;
}
/// Prepares to restore a database and all tables in it.
void prepareToBackupDatabase(const String & database_name_, const std::set<String> & except_list_)
{
@ -231,8 +295,6 @@ namespace
void prepareToBackupDatabase(const String & database_name_, const DatabasePtr & database_, const std::set<String> & except_list_)
{
context->checkAccess(AccessType::SHOW_DATABASES, database_name_);
/// Check that we are not trying to restore the same database again.
String name_in_backup = renaming_settings.getNewDatabaseName(database_name_);
if (databases.contains(name_in_backup))
@ -244,7 +306,7 @@ namespace
/// Make a create query for this database.
auto create_query = prepareCreateQueryForBackup(database_->getCreateDatabaseQuery());
CreateDatabaseInfo info;
DatabaseInfo info;
info.create_query = create_query;
databases[name_in_backup] = std::move(info);
}
@ -298,36 +360,77 @@ namespace
return {metadata_path, std::move(metadata_entry)};
}
/// Information which is used to make an instance of RestoreTableFromBackupTask.
struct CreateTableInfo
struct TableInfo;
void appendBackupEntriesForData(BackupEntries & res, const TableInfo & info) const
{
if (info.zk_path.empty())
{
for (auto & [relative_path, backup_entry] : info.data)
res.emplace_back(info.data_path + relative_path, backup_entry);
return;
}
Strings data_paths = backup_coordination->getReplicatedTableDataPaths(info.zk_path);
Strings part_names = backup_coordination->getReplicatedTablePartNames(backup_settings.host_id, info.table_name, info.zk_path);
std::unordered_set<std::string_view> part_names_set{part_names.begin(), part_names.end()};
for (auto & [relative_path, backup_entry] : info.data)
{
size_t slash_pos = relative_path.find('/');
if (slash_pos != String::npos)
{
String part_name = relative_path.substr(0, slash_pos);
if (MergeTreePartInfo::tryParsePartName(part_name, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING))
{
if (!part_names_set.contains(part_name))
continue;
for (const auto & data_path : data_paths)
res.emplace_back(data_path + relative_path, backup_entry);
continue;
}
}
res.emplace_back(info.data_path + relative_path, backup_entry);
}
}
/// Information which is used to make an instance of RestoreTableFromBackupTask.
struct TableInfo
{
DatabaseAndTableName table_name;
ASTPtr create_query;
StoragePtr storage;
ASTs partitions;
bool has_data = false;
BackupEntries data;
String data_path;
String zk_path;
};
/// Information which is used to make an instance of RestoreDatabaseFromBackupTask.
struct CreateDatabaseInfo
struct DatabaseInfo
{
ASTPtr create_query;
};
ContextPtr context;
BackupSettings backup_settings;
std::shared_ptr<IBackupCoordination> backup_coordination;
size_t shard_num_in_backup = 0;
size_t replica_num_in_backup = 0;
DDLRenamingSettings renaming_settings;
std::unordered_map<String /* db_name_in_backup */, CreateDatabaseInfo> databases;
std::map<DatabaseAndTableName /* table_name_in_backup */, CreateTableInfo> tables;
std::unordered_map<String /* db_name_in_backup */, DatabaseInfo> databases;
std::map<DatabaseAndTableName /* table_name_in_backup */, TableInfo> tables;
};
}
BackupEntries makeBackupEntries(const ContextPtr & context, const Elements & elements, const BackupSettings & backup_settings)
BackupEntries makeBackupEntries(
const ContextPtr & context,
const Elements & elements,
const BackupSettings & backup_settings,
std::shared_ptr<IBackupCoordination> backup_coordination,
std::chrono::seconds timeout_for_other_nodes_to_prepare)
{
BackupEntriesBuilder builder{context, backup_settings};
builder.prepare(elements);
BackupEntriesBuilder builder{context, backup_settings, backup_coordination};
builder.prepare(elements, timeout_for_other_nodes_to_prepare);
return builder.makeBackupEntries();
}
@ -400,4 +503,48 @@ void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries
backup->finalizeWriting();
}
/// Returns access required to execute BACKUP query.
AccessRightsElements getRequiredAccessToBackup(const ASTBackupQuery::Elements & elements, const BackupSettings & backup_settings)
{
AccessRightsElements required_access;
for (const auto & element : elements)
{
switch (element.type)
{
case ASTBackupQuery::TABLE:
{
if (element.is_temp_db)
break;
AccessFlags flags = AccessType::SHOW_TABLES;
if (!backup_settings.structure_only)
flags |= AccessType::SELECT;
required_access.emplace_back(flags, element.name.first, element.name.second);
break;
}
case ASTBackupQuery::DATABASE:
{
if (element.is_temp_db)
break;
AccessFlags flags = AccessType::SHOW_TABLES | AccessType::SHOW_DATABASES;
if (!backup_settings.structure_only)
flags |= AccessType::SELECT;
required_access.emplace_back(flags, element.name.first);
/// TODO: It's better to process `element.except_list` somehow.
break;
}
case ASTBackupQuery::ALL_DATABASES:
{
AccessFlags flags = AccessType::SHOW_TABLES | AccessType::SHOW_DATABASES;
if (!backup_settings.structure_only)
flags |= AccessType::SELECT;
required_access.emplace_back(flags);
/// TODO: It's better to process `element.except_list` somehow.
break;
}
}
}
return required_access;
}
}

View File

@ -10,16 +10,26 @@ class IBackup;
using BackupPtr = std::shared_ptr<const IBackup>;
using BackupMutablePtr = std::shared_ptr<IBackup>;
class IBackupEntry;
using BackupEntryPtr = std::unique_ptr<IBackupEntry>;
using BackupEntryPtr = std::shared_ptr<const IBackupEntry>;
using BackupEntries = std::vector<std::pair<String, BackupEntryPtr>>;
struct BackupSettings;
class IBackupCoordination;
class AccessRightsElements;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
/// Prepares backup entries.
BackupEntries makeBackupEntries(const ContextPtr & context, const ASTBackupQuery::Elements & elements, const BackupSettings & backup_settings);
BackupEntries makeBackupEntries(
const ContextPtr & context,
const ASTBackupQuery::Elements & elements,
const BackupSettings & backup_settings,
std::shared_ptr<IBackupCoordination> backup_coordination,
std::chrono::seconds timeout_for_other_nodes_to_prepare = std::chrono::seconds::zero());
/// Write backup entries to an opened backup.
void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries, ThreadPool & thread_pool);
/// Returns access required to execute BACKUP query.
AccessRightsElements getRequiredAccessToBackup(const ASTBackupQuery::Elements & elements, const BackupSettings & backup_settings);
}

View File

@ -4,6 +4,8 @@
#include <Backups/BackupSettings.h>
#include <Backups/BackupUtils.h>
#include <Backups/IBackupEntry.h>
#include <Backups/BackupCoordinationDistributed.h>
#include <Backups/BackupCoordinationLocal.h>
#include <Backups/IRestoreTask.h>
#include <Backups/RestoreCoordinationDistributed.h>
#include <Backups/RestoreCoordinationLocal.h>
@ -21,160 +23,18 @@
namespace DB
{
namespace ErrorCodes
{
extern const int QUERY_IS_PROHIBITED;
extern const int LOGICAL_ERROR;
}
namespace
{
void checkNoMultipleReplicas(const std::vector<Strings> & cluster_host_ids, size_t only_shard_num)
{
if (only_shard_num)
{
if ((only_shard_num <= cluster_host_ids.size()) && (cluster_host_ids[only_shard_num - 1].size() > 1))
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Backup of multiple replicas is disabled. Choose one replica with the replica_num setting or specify allow_storing_multiple_replicas=true");
}
for (const auto & shard : cluster_host_ids)
{
if (shard.size() > 1)
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Backup of multiple replicas is disabled. Choose one replica with the replica_num setting or specify allow_storing_multiple_replicas=true");
}
}
void executeBackupImpl(const ASTBackupQuery & query, const UUID & backup_uuid, const ContextPtr & context, ThreadPool & thread_pool)
{
const auto backup_info = BackupInfo::fromAST(*query.backup_name);
const auto backup_settings = BackupSettings::fromBackupQuery(query);
std::shared_ptr<ASTBackupQuery> new_query = std::static_pointer_cast<ASTBackupQuery>(query.clone());
BackupFactory::CreateParams backup_create_params;
backup_create_params.open_mode = IBackup::OpenMode::WRITE;
backup_create_params.context = context;
backup_create_params.backup_info = backup_info;
backup_create_params.base_backup_info = backup_settings.base_backup_info;
backup_create_params.compression_method = backup_settings.compression_method;
backup_create_params.compression_level = backup_settings.compression_level;
backup_create_params.password = backup_settings.password;
backup_create_params.backup_uuid = backup_uuid;
backup_create_params.is_internal_backup = backup_settings.internal;
backup_create_params.coordination_zk_path = backup_settings.coordination_zk_path;
ClusterPtr cluster;
if (!query.cluster.empty())
{
new_query->cluster = context->getMacros()->expand(query.cluster);
cluster = context->getCluster(new_query->cluster);
auto new_backup_settings = backup_settings;
new_backup_settings.cluster_host_ids = cluster->getHostIDs();
if (!backup_settings.allow_storing_multiple_replicas && !backup_settings.replica_num)
checkNoMultipleReplicas(new_backup_settings.cluster_host_ids, backup_settings.shard_num);
if (backup_settings.coordination_zk_path.empty())
{
String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups");
new_backup_settings.coordination_zk_path
= query.cluster.empty() ? "" : (root_zk_path + "/backup-" + toString(backup_uuid));
backup_create_params.coordination_zk_path = new_backup_settings.coordination_zk_path;
}
new_backup_settings.copySettingsToQuery(*new_query);
}
BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params);
if (!query.cluster.empty())
{
DDLQueryOnClusterParams params;
params.cluster = cluster;
params.only_shard_num = backup_settings.shard_num;
params.only_replica_num = backup_settings.replica_num;
auto res = executeDDLQueryOnCluster(new_query, context, params);
PullingPipelineExecutor executor(res.pipeline);
Block block;
while (executor.pull(block));
backup->finalizeWriting();
}
else
{
new_query->setDatabase(context->getCurrentDatabase());
auto backup_entries = makeBackupEntries(context, new_query->elements, backup_settings);
writeBackupEntries(backup, std::move(backup_entries), thread_pool);
}
}
void executeRestoreImpl(const ASTBackupQuery & query, const UUID & restore_uuid, ContextMutablePtr context, ThreadPool & thread_pool)
{
const auto backup_info = BackupInfo::fromAST(*query.backup_name);
const auto restore_settings = RestoreSettings::fromRestoreQuery(query);
bool is_internal_restore = restore_settings.internal;
std::shared_ptr<IRestoreCoordination> restore_coordination;
SCOPE_EXIT({
if (!is_internal_restore && restore_coordination)
restore_coordination->drop();
});
std::shared_ptr<ASTBackupQuery> new_query = std::static_pointer_cast<ASTBackupQuery>(query.clone());
ClusterPtr cluster;
if (!query.cluster.empty())
{
new_query->cluster = context->getMacros()->expand(query.cluster);
cluster = context->getCluster(new_query->cluster);
auto new_restore_settings = restore_settings;
new_restore_settings.cluster_host_ids = cluster->getHostIDs();
if (new_restore_settings.coordination_zk_path.empty())
{
String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups");
new_restore_settings.coordination_zk_path
= query.cluster.empty() ? "" : (root_zk_path + "/restore-" + toString(restore_uuid));
}
new_restore_settings.copySettingsToQuery(*new_query);
}
if (!restore_settings.coordination_zk_path.empty())
restore_coordination = std::make_shared<RestoreCoordinationDistributed>(restore_settings.coordination_zk_path, [context=context] { return context->getZooKeeper(); });
else
restore_coordination = std::make_shared<RestoreCoordinationLocal>();
if (!query.cluster.empty())
{
DDLQueryOnClusterParams params;
params.cluster = cluster;
params.only_shard_num = restore_settings.shard_num;
params.only_replica_num = restore_settings.replica_num;
auto res = executeDDLQueryOnCluster(new_query, context, params);
PullingPipelineExecutor executor(res.pipeline);
Block block;
while (executor.pull(block));
}
else
{
new_query->setDatabase(context->getCurrentDatabase());
BackupFactory::CreateParams backup_open_params;
backup_open_params.open_mode = IBackup::OpenMode::READ;
backup_open_params.context = context;
backup_open_params.backup_info = backup_info;
backup_open_params.base_backup_info = restore_settings.base_backup_info;
backup_open_params.password = restore_settings.password;
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
auto timeout_for_restoring_metadata = std::chrono::seconds{context->getConfigRef().getUInt("backups.restore_metadata_timeout", 0)};
auto restore_tasks = makeRestoreTasks(context, backup, new_query->elements, restore_settings, restore_coordination, timeout_for_restoring_metadata);
executeRestoreTasks(std::move(restore_tasks), thread_pool, restore_settings, restore_coordination, timeout_for_restoring_metadata);
}
}
}
BackupsWorker::BackupsWorker(size_t num_backup_threads, size_t num_restore_threads)
: backups_thread_pool(num_backup_threads)
, restores_thread_pool(num_restore_threads)
: backups_thread_pool(num_backup_threads, /* max_free_threads = */ 0, num_backup_threads)
, restores_thread_pool(num_restore_threads, /* max_free_threads = */ 0, num_restore_threads)
, log(&Poco::Logger::get("BackupsWorker"))
{
/// We set max_free_threads = 0 because we don't want to keep any threads if there is no BACKUP or RESTORE query running right now.
}
UUID BackupsWorker::start(const ASTPtr & backup_or_restore_query, ContextMutablePtr context)
@ -186,129 +46,320 @@ UUID BackupsWorker::start(const ASTPtr & backup_or_restore_query, ContextMutable
return startRestoring(backup_or_restore_query, context);
}
UUID BackupsWorker::startMakingBackup(const ASTPtr & query, const ContextPtr & context)
{
UUID uuid = UUIDHelpers::generateV4();
UUID backup_uuid = UUIDHelpers::generateV4();
auto backup_query = std::static_pointer_cast<ASTBackupQuery>(query->clone());
auto backup_info = BackupInfo::fromAST(*backup_query->backup_name);
auto backup_settings = BackupSettings::fromBackupQuery(*backup_query);
BackupInfo backup_info;
BackupSettings backup_settings;
addInfo(backup_uuid, backup_info.toString(), BackupStatus::MAKING_BACKUP, backup_settings.internal);
std::shared_ptr<IBackupCoordination> backup_coordination;
SCOPE_EXIT({
if (backup_coordination && !backup_settings.internal)
backup_coordination->drop();
});
BackupMutablePtr backup;
ContextPtr cloned_context;
bool on_cluster = !backup_query->cluster.empty();
std::shared_ptr<BlockIO> on_cluster_io;
try
{
const ASTBackupQuery & backup_query = typeid_cast<const ASTBackupQuery &>(*query);
backup_info = BackupInfo::fromAST(*backup_query.backup_name);
backup_settings = BackupSettings::fromBackupQuery(backup_query);
auto access_to_check = getRequiredAccessToBackup(backup_query->elements, backup_settings);
if (!on_cluster)
context->checkAccess(access_to_check);
ClusterPtr cluster;
if (on_cluster)
{
backup_query->cluster = context->getMacros()->expand(backup_query->cluster);
cluster = context->getCluster(backup_query->cluster);
backup_settings.cluster_host_ids = cluster->getHostIDs();
if (backup_settings.coordination_zk_path.empty())
{
String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups");
backup_settings.coordination_zk_path = root_zk_path + "/backup-" + toString(backup_uuid);
}
backup_settings.copySettingsToQuery(*backup_query);
}
if (!backup_settings.coordination_zk_path.empty())
backup_coordination = std::make_shared<BackupCoordinationDistributed>(
backup_settings.coordination_zk_path,
[global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); });
else
backup_coordination = std::make_shared<BackupCoordinationLocal>();
BackupFactory::CreateParams backup_create_params;
backup_create_params.open_mode = IBackup::OpenMode::WRITE;
backup_create_params.context = context;
backup_create_params.backup_info = backup_info;
backup_create_params.base_backup_info = backup_settings.base_backup_info;
backup_create_params.compression_method = backup_settings.compression_method;
backup_create_params.compression_level = backup_settings.compression_level;
backup_create_params.password = backup_settings.password;
backup_create_params.backup_uuid = backup_uuid;
backup_create_params.is_internal_backup = backup_settings.internal;
backup_create_params.backup_coordination = backup_coordination;
backup = BackupFactory::instance().createBackup(backup_create_params);
ContextMutablePtr mutable_context;
if (on_cluster || backup_settings.async)
cloned_context = mutable_context = Context::createCopy(context);
else
cloned_context = context; /// No need to clone context
if (on_cluster)
{
DDLQueryOnClusterParams params;
params.cluster = cluster;
params.only_shard_num = backup_settings.shard_num;
params.only_replica_num = backup_settings.replica_num;
params.access_to_check = access_to_check;
mutable_context->setSetting("distributed_ddl_task_timeout", -1); // No timeout
mutable_context->setSetting("distributed_ddl_output_mode", Field{"throw"});
auto res = executeDDLQueryOnCluster(backup_query, mutable_context, params);
on_cluster_io = std::make_shared<BlockIO>(std::move(res));
}
}
catch (...)
{
setStatus(backup_uuid, BackupStatus::FAILED_TO_BACKUP);
throw;
}
{
Info info;
info.uuid = uuid;
info.backup_name = backup_info.toString();
info.status = BackupStatus::MAKING_BACKUP;
info.status_changed_time = time(nullptr);
info.internal = backup_settings.internal;
std::lock_guard lock{infos_mutex};
infos.emplace(uuid, std::move(info));
}
auto job = [this, query, context, uuid]
auto job = [this,
backup,
backup_uuid,
backup_query,
backup_settings,
backup_coordination,
on_cluster_io,
cloned_context](bool in_separate_thread)
{
try
{
const ASTBackupQuery & backup_query = typeid_cast<const ASTBackupQuery &>(*query);
executeBackupImpl(backup_query, uuid, context, backups_thread_pool);
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = BackupStatus::BACKUP_COMPLETE;
info.status_changed_time = time(nullptr);
if (on_cluster_io)
{
PullingPipelineExecutor executor(on_cluster_io->pipeline);
Block block;
while (executor.pull(block))
;
backup->finalizeWriting();
}
else
{
std::optional<CurrentThread::QueryScope> query_scope;
if (in_separate_thread)
query_scope.emplace(cloned_context);
backup_query->setDatabase(cloned_context->getCurrentDatabase());
auto timeout_for_preparing = std::chrono::seconds{cloned_context->getConfigRef().getInt("backups.backup_prepare_timeout", -1)};
auto backup_entries
= makeBackupEntries(cloned_context, backup_query->elements, backup_settings, backup_coordination, timeout_for_preparing);
writeBackupEntries(backup, std::move(backup_entries), backups_thread_pool);
}
setStatus(backup_uuid, BackupStatus::BACKUP_COMPLETE);
}
catch (...)
{
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = BackupStatus::FAILED_TO_BACKUP;
info.status_changed_time = time(nullptr);
info.error_message = getCurrentExceptionMessage(false);
info.exception = std::current_exception();
setStatus(backup_uuid, BackupStatus::FAILED_TO_BACKUP);
if (!in_separate_thread)
throw;
}
};
if (backup_settings.async)
{
backups_thread_pool.scheduleOrThrowOnError(job);
}
backups_thread_pool.scheduleOrThrowOnError([job] { job(true); });
else
{
job();
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
if (info.status == BackupStatus::FAILED_TO_BACKUP)
std::rethrow_exception(info.exception);
}
job(false);
return uuid;
return backup_uuid;
}
UUID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePtr context)
{
UUID uuid = UUIDHelpers::generateV4();
UUID restore_uuid = UUIDHelpers::generateV4();
auto restore_query = std::static_pointer_cast<ASTBackupQuery>(query->clone());
auto backup_info = BackupInfo::fromAST(*restore_query->backup_name);
auto restore_settings = RestoreSettings::fromRestoreQuery(*restore_query);
BackupInfo backup_info;
RestoreSettings restore_settings;
addInfo(restore_uuid, backup_info.toString(), BackupStatus::RESTORING, restore_settings.internal);
std::shared_ptr<IRestoreCoordination> restore_coordination;
SCOPE_EXIT({
if (restore_coordination && !restore_settings.internal)
restore_coordination->drop();
});
ContextMutablePtr cloned_context;
std::shared_ptr<BlockIO> on_cluster_io;
bool on_cluster = !restore_query->cluster.empty();
try
{
const ASTBackupQuery & restore_query = typeid_cast<const ASTBackupQuery &>(*query);
backup_info = BackupInfo::fromAST(*restore_query.backup_name);
restore_settings = RestoreSettings::fromRestoreQuery(restore_query);
auto access_to_check = getRequiredAccessToRestore(restore_query->elements, restore_settings);
if (!on_cluster)
context->checkAccess(access_to_check);
ClusterPtr cluster;
if (on_cluster)
{
restore_query->cluster = context->getMacros()->expand(restore_query->cluster);
cluster = context->getCluster(restore_query->cluster);
restore_settings.cluster_host_ids = cluster->getHostIDs();
if (restore_settings.coordination_zk_path.empty())
{
String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups");
restore_settings.coordination_zk_path = root_zk_path + "/restore-" + toString(restore_uuid);
}
restore_settings.copySettingsToQuery(*restore_query);
}
if (!restore_settings.coordination_zk_path.empty())
restore_coordination = std::make_shared<RestoreCoordinationDistributed>(
restore_settings.coordination_zk_path,
[global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); });
else
restore_coordination = std::make_shared<RestoreCoordinationLocal>();
if (on_cluster || restore_settings.async)
cloned_context = Context::createCopy(context);
else
cloned_context = context; /// No need to clone context
if (on_cluster)
{
DDLQueryOnClusterParams params;
params.cluster = cluster;
params.only_shard_num = restore_settings.shard_num;
params.only_replica_num = restore_settings.replica_num;
params.access_to_check = access_to_check;
cloned_context->setSetting("distributed_ddl_task_timeout", -1); // No timeout
cloned_context->setSetting("distributed_ddl_output_mode", Field{"throw"});
auto res = executeDDLQueryOnCluster(restore_query, cloned_context, params);
on_cluster_io = std::make_shared<BlockIO>(std::move(res));
}
}
catch (...)
{
setStatus(restore_uuid, BackupStatus::FAILED_TO_RESTORE);
throw;
}
{
Info info;
info.uuid = uuid;
info.backup_name = backup_info.toString();
info.status = BackupStatus::RESTORING;
info.status_changed_time = time(nullptr);
info.internal = restore_settings.internal;
std::lock_guard lock{infos_mutex};
infos.emplace(uuid, std::move(info));
}
auto job = [this, query, context, uuid]
auto job = [this,
backup_info,
restore_uuid,
restore_query,
restore_settings,
restore_coordination,
on_cluster_io,
cloned_context](bool in_separate_thread)
{
try
{
const ASTBackupQuery & restore_query = typeid_cast<const ASTBackupQuery &>(*query);
executeRestoreImpl(restore_query, uuid, context, restores_thread_pool);
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = BackupStatus::RESTORED;
info.status_changed_time = time(nullptr);
if (on_cluster_io)
{
PullingPipelineExecutor executor(on_cluster_io->pipeline);
Block block;
while (executor.pull(block))
;
}
else
{
std::optional<CurrentThread::QueryScope> query_scope;
if (in_separate_thread)
query_scope.emplace(cloned_context);
restore_query->setDatabase(cloned_context->getCurrentDatabase());
BackupFactory::CreateParams backup_open_params;
backup_open_params.open_mode = IBackup::OpenMode::READ;
backup_open_params.context = cloned_context;
backup_open_params.backup_info = backup_info;
backup_open_params.base_backup_info = restore_settings.base_backup_info;
backup_open_params.password = restore_settings.password;
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
auto timeout_for_restoring_metadata
= std::chrono::seconds{cloned_context->getConfigRef().getInt("backups.restore_metadata_timeout", -1)};
auto restore_tasks = makeRestoreTasks(
cloned_context, backup, restore_query->elements, restore_settings, restore_coordination, timeout_for_restoring_metadata);
restoreMetadata(restore_tasks, restore_settings, restore_coordination, timeout_for_restoring_metadata);
restoreData(restore_tasks, restores_thread_pool);
}
setStatus(restore_uuid, BackupStatus::RESTORED);
}
catch (...)
{
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = BackupStatus::FAILED_TO_RESTORE;
info.status_changed_time = time(nullptr);
info.error_message = getCurrentExceptionMessage(false);
info.exception = std::current_exception();
setStatus(restore_uuid, BackupStatus::FAILED_TO_RESTORE);
if (!in_separate_thread)
throw;
}
};
if (restore_settings.async)
{
restores_thread_pool.scheduleOrThrowOnError(job);
}
backups_thread_pool.scheduleOrThrowOnError([job] { job(true); });
else
{
job();
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
if (info.status == BackupStatus::FAILED_TO_RESTORE)
std::rethrow_exception(info.exception);
}
job(false);
return uuid;
return restore_uuid;
}
void BackupsWorker::wait(const UUID & backup_or_restore_uuid)
void BackupsWorker::addInfo(const UUID & uuid, const String & backup_name, BackupStatus status, bool internal)
{
Info info;
info.uuid = uuid;
info.backup_name = backup_name;
info.status = status;
info.status_changed_time = time(nullptr);
info.internal = internal;
std::lock_guard lock{infos_mutex};
infos[uuid] = std::move(info);
}
void BackupsWorker::setStatus(const UUID & uuid, BackupStatus status)
{
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = status;
info.status_changed_time = time(nullptr);
if ((status == BackupStatus::FAILED_TO_BACKUP) || (status == BackupStatus::FAILED_TO_RESTORE))
{
info.error_message = getCurrentExceptionMessage(false);
info.exception = std::current_exception();
}
switch (status)
{
case BackupStatus::BACKUP_COMPLETE:
LOG_INFO(log, "{} {} was created successfully", (info.internal ? "Internal backup" : "Backup"), info.backup_name);
break;
case BackupStatus::FAILED_TO_BACKUP:
LOG_ERROR(log, "Failed to create {} {}", (info.internal ? "internal backup" : "backup"), info.backup_name);
break;
case BackupStatus::RESTORED:
LOG_INFO(log, "Restored from {} {} successfully", (info.internal ? "internal backup" : "backup"), info.backup_name);
break;
case BackupStatus::FAILED_TO_RESTORE:
LOG_ERROR(log, "Failed to restore from {} {}", (info.internal ? "internal backup" : "backup"), info.backup_name);
break;
default:
break;
}
}
void BackupsWorker::wait(const UUID & backup_or_restore_uuid, bool rethrow_exception)
{
std::unique_lock lock{infos_mutex};
status_changed.wait(lock, [&]
@ -316,7 +367,10 @@ void BackupsWorker::wait(const UUID & backup_or_restore_uuid)
auto it = infos.find(backup_or_restore_uuid);
if (it == infos.end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "BackupsWorker: Unknown UUID {}", toString(backup_or_restore_uuid));
auto current_status = it->second.status;
const auto & info = it->second;
auto current_status = info.status;
if (rethrow_exception && ((current_status == BackupStatus::FAILED_TO_BACKUP) || (current_status == BackupStatus::FAILED_TO_RESTORE)))
std::rethrow_exception(info.exception);
return (current_status == BackupStatus::BACKUP_COMPLETE) || (current_status == BackupStatus::RESTORED);
});
}
@ -345,10 +399,10 @@ void BackupsWorker::shutdown()
size_t num_active_restores = restores_thread_pool.active();
if (!num_active_backups && !num_active_restores)
return;
LOG_INFO(&Poco::Logger::get("BackupsWorker"), "Waiting for {} backup and {} restore tasks to be finished", num_active_backups, num_active_restores);
LOG_INFO(log, "Waiting for {} backup and {} restore tasks to be finished", num_active_backups, num_active_restores);
backups_thread_pool.wait();
restores_thread_pool.wait();
LOG_INFO(&Poco::Logger::get("BackupsWorker"), "All backup and restore tasks have finished");
LOG_INFO(log, "All backup and restore tasks have finished");
}
}

View File

@ -26,7 +26,7 @@ public:
/// Waits until a BACKUP or RESTORE query started by start() is finished.
/// The function returns immediately if the operation is already finished.
void wait(const UUID & backup_or_restore_uuid);
void wait(const UUID & backup_or_restore_uuid, bool rethrow_exception = true);
/// Information about executing a BACKUP or RESTORE query started by calling start().
struct Info
@ -54,12 +54,16 @@ private:
UUID startMakingBackup(const ASTPtr & query, const ContextPtr & context);
UUID startRestoring(const ASTPtr & query, ContextMutablePtr context);
void addInfo(const UUID & uuid, const String & backup_name, BackupStatus status, bool internal);
void setStatus(const UUID & uuid, BackupStatus status);
ThreadPool backups_thread_pool;
ThreadPool restores_thread_pool;
std::unordered_map<UUID, Info> infos;
std::condition_variable status_changed;
mutable std::mutex infos_mutex;
const Poco::Logger * log;
};
}

View File

@ -303,14 +303,14 @@ void DDLRenamingSettings::setFromBackupQuery(const ASTBackupQuery::Elements & ba
{
const String & table_name = element.name.second;
String database_name = element.name.first;
if (element.name_is_in_temp_db)
if (element.is_temp_db)
database_name = DatabaseCatalog::TEMPORARY_DATABASE;
assert(!table_name.empty());
assert(!database_name.empty());
const String & new_table_name = element.new_name.second;
String new_database_name = element.new_name.first;
if (element.new_name_is_in_temp_db)
if (element.is_temp_db)
new_database_name = DatabaseCatalog::TEMPORARY_DATABASE;
assert(!new_table_name.empty());
assert(!new_database_name.empty());
@ -322,12 +322,12 @@ void DDLRenamingSettings::setFromBackupQuery(const ASTBackupQuery::Elements & ba
case ASTBackupQuery::DATABASE:
{
String database_name = element.name.first;
if (element.name_is_in_temp_db)
if (element.is_temp_db)
database_name = DatabaseCatalog::TEMPORARY_DATABASE;
assert(!database_name.empty());
String new_database_name = element.new_name.first;
if (element.new_name_is_in_temp_db)
if (element.is_temp_db)
new_database_name = DatabaseCatalog::TEMPORARY_DATABASE;
assert(!new_database_name.empty());

View File

@ -8,7 +8,7 @@
namespace DB
{
class IBackupEntry;
using BackupEntryPtr = std::unique_ptr<IBackupEntry>;
using BackupEntryPtr = std::shared_ptr<const IBackupEntry>;
/// Represents a backup, i.e. a storage of BackupEntries which can be accessed by their names.
/// A backup can be either incremental or non-incremental. An incremental backup doesn't store

View File

@ -6,6 +6,7 @@
namespace DB
{
using DatabaseAndTableName = std::pair<String, String>;
/// Keeps information about files contained in a backup.
class IBackupCoordination
@ -13,6 +14,44 @@ class IBackupCoordination
public:
virtual ~IBackupCoordination() = default;
/// Adds a data path in backup for a replicated table.
/// Multiple replicas of the replicated table call this function and then all the added paths can be returned by call of the function
/// getReplicatedTableDataPaths().
virtual void addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path) = 0;
struct PartNameAndChecksum
{
String part_name;
UInt128 checksum;
};
/// Adds part names which a specified replica of a replicated table is going to put to the backup.
/// Multiple replicas of the replicated table call this function and then the added part names can be returned by call of the function
/// getReplicatedTablePartNames().
/// Checksums are used only to control that parts under the same names on different replicas are the same.
virtual void addReplicatedTablePartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
= 0;
/// Sets that a specified host finished preparations for copying the backup's files, successfully or not.
/// `error_message` should be set to true if it was not successful.
virtual void finishPreparing(const String & host_id, const String & error_message = {}) = 0;
/// Waits for a specified time for specified hosts to finish preparation for copying the backup's files.
virtual void
waitForAllHostsPrepared(const Strings & host_ids, std::chrono::seconds timeout = std::chrono::seconds(-1) /* no timeout */) const = 0;
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedTableDataPath()).
virtual Strings getReplicatedTableDataPaths(const String & table_zk_path) const = 0;
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
/// This is the same list as it was added by call of the function addReplicatedTablePartNames() but without duplications and without
/// parts covered by another parts.
virtual Strings getReplicatedTablePartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const = 0;
struct FileInfo
{
String file_name;

View File

@ -26,7 +26,7 @@ public:
virtual std::unique_ptr<SeekableReadBuffer> getReadBuffer() const = 0;
};
using BackupEntryPtr = std::unique_ptr<IBackupEntry>;
using BackupEntryPtr = std::shared_ptr<const IBackupEntry>;
using BackupEntries = std::vector<std::pair<String, BackupEntryPtr>>;
}

View File

@ -30,11 +30,18 @@ public:
= 0;
/// Wait for another host to create a table in a replicated database.
virtual void waitForCreatingTableInReplicatedDB(
virtual void waitForTableCreatedInReplicatedDB(
const String & database_name,
const String & database_zk_path,
const String & table_name,
std::chrono::seconds timeout = std::chrono::seconds::zero())
std::chrono::seconds timeout = std::chrono::seconds(-1) /* no timeout */)
= 0;
/// Adds a path in backup used by a replicated table.
/// This function can be called multiple times for the same table with different `host_id`, and in that case
/// getReplicatedTableDataPath() will choose `data_path_in_backup` with the lexicographycally first `host_id`.
virtual void addReplicatedTableDataPath(
const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path, const String & data_path_in_backup)
= 0;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
@ -42,14 +49,8 @@ public:
virtual void finishRestoringMetadata(const String & host_id, const String & error_message = {}) = 0;
/// Waits for a specified list of hosts to finish restoring their metadata.
virtual void waitForAllHostsToRestoreMetadata(const Strings & host_ids, std::chrono::seconds timeout = std::chrono::seconds::zero()) const = 0;
/// Sets path in backup used by a replicated table.
/// This function can be called multiple times for the same table with different `host_id`, and in that case
/// getReplicatedTableDataPath() will choose `data_path_in_backup` with the lexicographycally first `host_id`.
virtual void setReplicatedTableDataPath(
const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path, const String & data_path_in_backup)
= 0;
virtual void waitForAllHostsRestoredMetadata(
const Strings & host_ids, std::chrono::seconds timeout = std::chrono::seconds(-1) /* no timeout */) const = 0;
/// Gets path in backup used by a replicated table.
virtual String getReplicatedTableDataPath(const String & table_zk_path) const = 0;

View File

@ -15,12 +15,189 @@ namespace DB
namespace ErrorCodes
{
extern const int FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE;
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
}
namespace
{
struct TableInReplicatedDatabaseStatus
struct ReplicatedTableDataPath
{
String host_id;
DatabaseAndTableName table_name;
String data_path_in_backup;
String serialize() const
{
WriteBufferFromOwnString out;
writeBinary(host_id, out);
writeBinary(table_name.first, out);
writeBinary(table_name.second, out);
writeBinary(data_path_in_backup, out);
return out.str();
}
static ReplicatedTableDataPath deserialize(const String & str)
{
ReadBufferFromString in{str};
ReplicatedTableDataPath res;
readBinary(res.host_id, in);
readBinary(res.table_name.first, in);
readBinary(res.table_name.second, in);
readBinary(res.data_path_in_backup, in);
return res;
}
};
}
class RestoreCoordinationDistributed::ReplicatedDatabasesMetadataSync
{
public:
ReplicatedDatabasesMetadataSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
: zookeeper_path(zookeeper_path_), get_zookeeper(get_zookeeper_), log(&Poco::Logger::get("RestoreCoordination"))
{
createRootNodes();
}
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
bool startCreatingTable(
const String & host_id_, const String & database_name_, const String & database_zk_path_, const String & table_name_)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/" + escapeForFileName(database_zk_path_);
zookeeper->createIfNotExists(path, "");
TableStatus status;
status.host_id = host_id_;
status.table_name = DatabaseAndTableName{database_name_, table_name_};
path += "/" + escapeForFileName(table_name_);
auto code = zookeeper->tryCreate(path, status.serialize(), zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
throw zkutil::KeeperException(code, path);
return (code == Coordination::Error::ZOK);
}
/// Sets that either we have been created a table in a replicated database or failed doing that.
/// In the latter case `error_message` should be set.
/// Calling this function unblocks other hosts waiting for this table to be created (see waitForCreatingTableInReplicatedDB()).
void finishCreatingTable(
const String & /* host_id_ */,
const String & database_name_,
const String & database_zk_path_,
const String & table_name_,
const String & error_message_)
{
if (error_message_.empty())
LOG_TRACE(log, "Created table {}.{}", database_name_, table_name_);
else
LOG_TRACE(log, "Failed to created table {}.{}: {}", database_name_, table_name_, error_message_);
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/" + escapeForFileName(database_zk_path_) + "/" + escapeForFileName(table_name_);
auto status = TableStatus::deserialize(zookeeper->get(path));
status.error_message = error_message_;
status.ready = error_message_.empty();
zookeeper->set(path, status.serialize());
}
/// Wait for another host to create a table in a replicated database.
void waitForTableCreated(
const String & /* database_name_ */, const String & database_zk_path_, const String & table_name_, std::chrono::seconds timeout_)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/" + escapeForFileName(database_zk_path_) + "/" + escapeForFileName(table_name_);
TableStatus status;
std::atomic<bool> watch_set = false;
std::condition_variable watch_triggered_event;
auto watch_callback = [&](const Coordination::WatchResponse &)
{
watch_set = false; /// After it's triggered it's not set until we call getChildrenWatch() again.
watch_triggered_event.notify_all();
};
auto watch_triggered = [&] { return !watch_set; };
bool use_timeout = (timeout_.count() >= 0);
std::chrono::steady_clock::duration time_left = timeout_;
std::mutex dummy_mutex;
while (true)
{
if (use_timeout && (time_left.count() <= 0))
{
status = TableStatus::deserialize(zookeeper->get(path));
break;
}
watch_set = true;
status = TableStatus::deserialize(zookeeper->getWatch(path, nullptr, watch_callback));
if (!status.error_message.empty() || status.ready)
break;
LOG_TRACE(log, "Waiting for host {} to create table {}.{}", status.host_id, status.table_name.first, status.table_name.second);
{
std::unique_lock dummy_lock{dummy_mutex};
if (use_timeout)
{
std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
if (!watch_triggered_event.wait_for(dummy_lock, time_left, watch_triggered))
break;
time_left -= (std::chrono::steady_clock::now() - start_time);
}
else
watch_triggered_event.wait(dummy_lock, watch_triggered);
}
}
if (watch_set)
{
/// Remove watch by triggering it.
++status.increment;
zookeeper->set(path, status.serialize());
std::unique_lock dummy_lock{dummy_mutex};
watch_triggered_event.wait_for(dummy_lock, timeout_, watch_triggered);
}
if (!status.error_message.empty())
throw Exception(
ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE,
"Host {} failed to create table {}.{}: {}", status.host_id, status.table_name.first, status.table_name.second, status.error_message);
if (status.ready)
{
LOG_TRACE(log, "Host {} created table {}.{}", status.host_id, status.table_name.first, status.table_name.second);
return;
}
throw Exception(
ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE,
"Host {} was unable to create table {}.{} in {}",
status.host_id,
status.table_name.first,
table_name_,
to_string(timeout_));
}
private:
void createRootNodes()
{
auto zookeeper = get_zookeeper();
zookeeper->createAncestors(zookeeper_path);
zookeeper->createIfNotExists(zookeeper_path, "");
}
struct TableStatus
{
String host_id;
DatabaseAndTableName table_name;
@ -28,54 +205,44 @@ namespace
String error_message;
size_t increment = 0;
void write(WriteBuffer & out) const
String serialize() const
{
WriteBufferFromOwnString out;
writeBinary(host_id, out);
writeBinary(table_name.first, out);
writeBinary(table_name.second, out);
writeBinary(ready, out);
writeBinary(error_message, out);
writeBinary(increment, out);
return out.str();
}
void read(ReadBuffer & in)
static TableStatus deserialize(const String & str)
{
readBinary(host_id, in);
readBinary(table_name.first, in);
readBinary(table_name.second, in);
readBinary(ready, in);
readBinary(error_message, in);
readBinary(increment, in);
ReadBufferFromString in{str};
TableStatus res;
readBinary(res.host_id, in);
readBinary(res.table_name.first, in);
readBinary(res.table_name.second, in);
readBinary(res.ready, in);
readBinary(res.error_message, in);
readBinary(res.increment, in);
return res;
}
};
struct ReplicatedTableDataPath
{
String host_id;
DatabaseAndTableName table_name;
String data_path_in_backup;
void write(WriteBuffer & out) const
{
writeBinary(host_id, out);
writeBinary(table_name.first, out);
writeBinary(table_name.second, out);
writeBinary(data_path_in_backup, out);
}
void read(ReadBuffer & in)
{
readBinary(host_id, in);
readBinary(table_name.first, in);
readBinary(table_name.second, in);
readBinary(data_path_in_backup, in);
}
};
}
const String zookeeper_path;
const zkutil::GetZooKeeper get_zookeeper;
const Poco::Logger * log;
};
RestoreCoordinationDistributed::RestoreCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
: zookeeper_path(zookeeper_path_), get_zookeeper(get_zookeeper_), log(&Poco::Logger::get("RestoreCoordinationDistributed"))
: zookeeper_path(zookeeper_path_)
, get_zookeeper(get_zookeeper_)
, replicated_databases_metadata_sync(
std::make_unique<ReplicatedDatabasesMetadataSync>(zookeeper_path_ + "/repl_databases_metadata", get_zookeeper_))
, all_metadata_barrier(zookeeper_path_ + "/all_metadata", get_zookeeper_, "RestoreCoordination", "restoring metadata")
{
createRootNodes();
}
@ -87,9 +254,7 @@ void RestoreCoordinationDistributed::createRootNodes()
auto zookeeper = get_zookeeper();
zookeeper->createAncestors(zookeeper_path);
zookeeper->createIfNotExists(zookeeper_path, "");
zookeeper->createIfNotExists(zookeeper_path + "/tables_in_repl_databases", "");
zookeeper->createIfNotExists(zookeeper_path + "/metadata_ready", "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_data_paths", "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_paths", "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_partitions", "");
}
@ -100,302 +265,54 @@ void RestoreCoordinationDistributed::removeAllNodes()
}
bool RestoreCoordinationDistributed::startCreatingTableInReplicatedDB(
const String & host_id_, const String & database_name_, const String & database_zk_path_, const String & table_name_)
const String & host_id, const String & database_name, const String & database_zk_path, const String & table_name)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/tables_in_repl_databases/" + escapeForFileName(database_zk_path_);
zookeeper->createIfNotExists(path, "");
TableInReplicatedDatabaseStatus status;
status.host_id = host_id_;
status.table_name = DatabaseAndTableName{database_name_, table_name_};
String status_str;
{
WriteBufferFromOwnString buf;
status.write(buf);
status_str = buf.str();
}
path += "/" + escapeForFileName(table_name_);
auto code = zookeeper->tryCreate(path, status_str, zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
throw zkutil::KeeperException(code, path);
return (code == Coordination::Error::ZOK);
return replicated_databases_metadata_sync->startCreatingTable(host_id, database_name, database_zk_path, table_name);
}
/// Ends creating table in a replicated database, successfully or with an error.
/// In the latter case `error_message` should be set.
void RestoreCoordinationDistributed::finishCreatingTableInReplicatedDB(
const String & /* host_id_ */,
const String & database_name_,
const String & database_zk_path_,
const String & table_name_,
const String & error_message_)
const String & host_id,
const String & database_name,
const String & database_zk_path,
const String & table_name,
const String & error_message)
{
if (error_message_.empty())
LOG_TRACE(log, "Created table {}.{}", database_name_, table_name_);
else
LOG_TRACE(log, "Failed to created table {}.{}: {}", database_name_, table_name_, error_message_);
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/tables_in_repl_databases/" + escapeForFileName(database_zk_path_) + "/" + escapeForFileName(table_name_);
TableInReplicatedDatabaseStatus status;
String status_str = zookeeper->get(path);
{
ReadBufferFromString buf{status_str};
status.read(buf);
}
status.error_message = error_message_;
status.ready = error_message_.empty();
{
WriteBufferFromOwnString buf;
status.write(buf);
status_str = buf.str();
}
zookeeper->set(path, status_str);
return replicated_databases_metadata_sync->finishCreatingTable(host_id, database_name, database_zk_path, table_name, error_message);
}
/// Wait for another host to create a table in a replicated database.
void RestoreCoordinationDistributed::waitForCreatingTableInReplicatedDB(
const String & /* database_name_ */, const String & database_zk_path_, const String & table_name_, std::chrono::seconds timeout_)
void RestoreCoordinationDistributed::waitForTableCreatedInReplicatedDB(
const String & database_name, const String & database_zk_path, const String & table_name, std::chrono::seconds timeout)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/tables_in_repl_databases/" + escapeForFileName(database_zk_path_) + "/" + escapeForFileName(table_name_);
TableInReplicatedDatabaseStatus status;
std::atomic<bool> watch_set = false;
std::condition_variable watch_triggered_event;
auto watch_callback = [&](const Coordination::WatchResponse &)
{
watch_set = false; /// After it's triggered it's not set until we call getChildrenWatch() again.
watch_triggered_event.notify_all();
};
auto watch_triggered = [&] { return !watch_set; };
bool use_timeout = (timeout_.count() > 0);
std::chrono::steady_clock::duration time_left = timeout_;
std::mutex dummy_mutex;
while (!use_timeout || (time_left.count() > 0))
{
watch_set = true;
String status_str = zookeeper->getWatch(path, nullptr, watch_callback);
{
ReadBufferFromString buf{status_str};
status.read(buf);
}
if (!status.error_message.empty())
throw Exception(
ErrorCodes::FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE,
"Host {} failed to create table {}.{}: {}", status.host_id, status.table_name.first, status.table_name.second, status.error_message);
if (status.ready)
{
LOG_TRACE(log, "Host {} created table {}.{}", status.host_id, status.table_name.first, status.table_name.second);
return;
}
LOG_TRACE(log, "Waiting for host {} to create table {}.{}", status.host_id, status.table_name.first, status.table_name.second);
std::chrono::steady_clock::time_point start_time;
if (use_timeout)
start_time = std::chrono::steady_clock::now();
bool waited;
{
std::unique_lock dummy_lock{dummy_mutex};
if (use_timeout)
{
waited = watch_triggered_event.wait_for(dummy_lock, time_left, watch_triggered);
}
else
{
watch_triggered_event.wait(dummy_lock, watch_triggered);
waited = true;
}
}
if (use_timeout)
{
time_left -= (std::chrono::steady_clock::now() - start_time);
if (time_left.count() < 0)
time_left = std::chrono::steady_clock::duration::zero();
}
if (!waited)
break;
}
if (watch_set)
{
/// Remove watch by triggering it.
++status.increment;
WriteBufferFromOwnString buf;
status.write(buf);
zookeeper->set(path, buf.str());
std::unique_lock dummy_lock{dummy_mutex};
watch_triggered_event.wait_for(dummy_lock, timeout_, watch_triggered);
}
throw Exception(
ErrorCodes::FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE,
"Host {} was unable to create table {}.{} in {}",
status.host_id,
status.table_name.first,
table_name_,
to_string(timeout_));
return replicated_databases_metadata_sync->waitForTableCreated(database_name, database_zk_path, table_name, timeout);
}
void RestoreCoordinationDistributed::finishRestoringMetadata(const String & host_id_, const String & error_message_)
void RestoreCoordinationDistributed::finishRestoringMetadata(const String & host_id, const String & error_message)
{
LOG_TRACE(log, "Finished restoring metadata{}", (error_message_.empty() ? "" : (" with error " + error_message_)));
auto zookeeper = get_zookeeper();
if (error_message_.empty())
zookeeper->create(zookeeper_path + "/metadata_ready/" + host_id_ + ":ready", "", zkutil::CreateMode::Persistent);
else
zookeeper->create(zookeeper_path + "/metadata_ready/" + host_id_ + ":error", error_message_, zkutil::CreateMode::Persistent);
all_metadata_barrier.finish(host_id, error_message);
}
void RestoreCoordinationDistributed::waitForAllHostsToRestoreMetadata(const Strings & host_ids_, std::chrono::seconds timeout_) const
void RestoreCoordinationDistributed::waitForAllHostsRestoredMetadata(const Strings & host_ids, std::chrono::seconds timeout) const
{
auto zookeeper = get_zookeeper();
bool all_hosts_ready = false;
String not_ready_host_id;
String error_host_id;
String error_message;
/// Returns true of everything's ready, or false if we need to wait more.
auto process_nodes = [&](const Strings & nodes)
{
std::unordered_set<std::string_view> set{nodes.begin(), nodes.end()};
for (const String & host_id : host_ids_)
{
if (set.contains(host_id + ":error"))
{
error_host_id = host_id;
error_message = zookeeper->get(zookeeper_path + "/metadata_ready/" + host_id + ":error");
return;
}
if (!set.contains(host_id + ":ready"))
{
LOG_TRACE(log, "Waiting for host {} to restore its metadata", host_id);
not_ready_host_id = host_id;
return;
}
}
all_hosts_ready = true;
};
std::atomic<bool> watch_set = false;
std::condition_variable watch_triggered_event;
auto watch_callback = [&](const Coordination::WatchResponse &)
{
watch_set = false; /// After it's triggered it's not set until we call getChildrenWatch() again.
watch_triggered_event.notify_all();
};
auto watch_triggered = [&] { return !watch_set; };
bool use_timeout = (timeout_.count() > 0);
std::chrono::steady_clock::duration time_left = timeout_;
std::mutex dummy_mutex;
while (!use_timeout || (time_left.count() > 0))
{
watch_set = true;
Strings children = zookeeper->getChildrenWatch(zookeeper_path + "/metadata_ready", nullptr, watch_callback);
process_nodes(children);
if (!error_message.empty())
throw Exception(
ErrorCodes::FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE,
"Host {} was unable to restore its metadata: {}",
error_host_id,
error_message);
if (all_hosts_ready)
{
LOG_TRACE(log, "All hosts have finished restoring metadata");
return;
}
std::chrono::steady_clock::time_point start_time;
if (use_timeout)
start_time = std::chrono::steady_clock::now();
bool waited;
{
std::unique_lock dummy_lock{dummy_mutex};
if (use_timeout)
{
waited = watch_triggered_event.wait_for(dummy_lock, time_left, watch_triggered);
}
else
{
watch_triggered_event.wait(dummy_lock, watch_triggered);
waited = true;
}
}
if (use_timeout)
{
time_left -= (std::chrono::steady_clock::now() - start_time);
if (time_left.count() < 0)
time_left = std::chrono::steady_clock::duration::zero();
}
if (!waited)
break;
}
if (watch_set)
{
/// Remove watch by triggering it.
zookeeper->create(zookeeper_path + "/metadata_ready/remove_watch-", "", zkutil::CreateMode::EphemeralSequential);
std::unique_lock dummy_lock{dummy_mutex};
watch_triggered_event.wait_for(dummy_lock, timeout_, watch_triggered);
}
throw Exception(
ErrorCodes::FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE,
"Host {} was unable to restore its metadata in {}",
not_ready_host_id,
to_string(timeout_));
all_metadata_barrier.waitForAllHostsToFinish(host_ids, timeout);
}
void RestoreCoordinationDistributed::setReplicatedTableDataPath(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & data_path_in_backup_)
void RestoreCoordinationDistributed::addReplicatedTableDataPath(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & data_path_in_backup)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_data_paths/" + escapeForFileName(table_zk_path_);
String path = zookeeper_path + "/repl_tables_paths/" + escapeForFileName(table_zk_path);
String new_info_str;
{
ReplicatedTableDataPath new_info;
new_info.host_id = host_id_;
new_info.table_name = table_name_;
new_info.data_path_in_backup = data_path_in_backup_;
WriteBufferFromOwnString buf;
new_info.write(buf);
new_info_str = buf.str();
}
ReplicatedTableDataPath new_info;
new_info.host_id = host_id;
new_info.table_name = table_name;
new_info.data_path_in_backup = data_path_in_backup;
String new_info_str = new_info.serialize();
auto code = zookeeper->tryCreate(path, new_info_str, zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
@ -404,11 +321,8 @@ void RestoreCoordinationDistributed::setReplicatedTableDataPath(
while (code != Coordination::Error::ZOK)
{
Coordination::Stat stat;
String cur_info_str = zookeeper->get(path, &stat);
ReadBufferFromString buf{cur_info_str};
ReplicatedTableDataPath cur_info;
cur_info.read(buf);
if ((cur_info.host_id < host_id_) || ((cur_info.host_id == host_id_) && (cur_info.table_name <= table_name_)))
ReplicatedTableDataPath cur_info = ReplicatedTableDataPath::deserialize(zookeeper->get(path, &stat));
if ((cur_info.host_id < host_id) || ((cur_info.host_id == host_id) && (cur_info.table_name <= table_name)))
break;
code = zookeeper->trySet(path, new_info_str, stat.version);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZBADVERSION))
@ -419,11 +333,8 @@ void RestoreCoordinationDistributed::setReplicatedTableDataPath(
String RestoreCoordinationDistributed::getReplicatedTableDataPath(const String & table_zk_path_) const
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_data_paths/" + escapeForFileName(table_zk_path_);
String info_str = zookeeper->get(path);
ReadBufferFromString buf{info_str};
ReplicatedTableDataPath info;
info.read(buf);
String path = zookeeper_path + "/repl_tables_paths/" + escapeForFileName(table_zk_path_);
auto info = ReplicatedTableDataPath::deserialize(zookeeper->get(path));
return info.data_path_in_backup;
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <Backups/IRestoreCoordination.h>
#include <Backups/BackupCoordinationHelpers.h>
#include <Common/ZooKeeper/Common.h>
@ -11,42 +12,42 @@ namespace DB
class RestoreCoordinationDistributed : public IRestoreCoordination
{
public:
RestoreCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_);
RestoreCoordinationDistributed(const String & zookeeper_path, zkutil::GetZooKeeper get_zookeeper);
~RestoreCoordinationDistributed() override;
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
bool startCreatingTableInReplicatedDB(
const String & host_id_, const String & database_name_, const String & database_zk_path_, const String & table_name_) override;
const String & host_id, const String & database_name, const String & database_zk_path, const String & table_name) override;
/// Sets that either we have been created a table in a replicated database or failed doing that.
/// In the latter case `error_message` should be set.
/// Calling this function unblocks other hosts waiting for this table to be created (see waitForCreatingTableInReplicatedDB()).
void finishCreatingTableInReplicatedDB(
const String & host_id_,
const String & database_name_,
const String & database_zk_path_,
const String & table_name_,
const String & error_message_) override;
const String & host_id,
const String & database_name,
const String & database_zk_path,
const String & table_name,
const String & error_message) override;
/// Wait for another host to create a table in a replicated database.
void waitForCreatingTableInReplicatedDB(
const String & database_name_, const String & database_zk_path_, const String & table_name_, std::chrono::seconds timeout_) override;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
/// In the latter case `error_message` should be set.
void finishRestoringMetadata(const String & host_id_, const String & error_message_) override;
/// Waits for all hosts to finish restoring their metadata (i.e. to finish creating databases and tables). Returns false if time is out.
void waitForAllHostsToRestoreMetadata(const Strings & host_ids_, std::chrono::seconds timeout_) const override;
void waitForTableCreatedInReplicatedDB(
const String & database_name, const String & database_zk_path, const String & table_name, std::chrono::seconds timeout) override;
/// Sets path in backup used by a replicated table.
/// This function can be called multiple times for the same table with different `host_id`, and in that case
/// getReplicatedTableDataPath() will choose `data_path_in_backup` with the lexicographycally first `host_id`.
void setReplicatedTableDataPath(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & data_path_in_backup_) override;
void addReplicatedTableDataPath(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & data_path_in_backup) override;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
/// In the latter case `error_message` should be set.
void finishRestoringMetadata(const String & host_id, const String & error_message) override;
/// Waits for all hosts to finish restoring their metadata (i.e. to finish creating databases and tables). Returns false if time is out.
void waitForAllHostsRestoredMetadata(const Strings & host_ids, std::chrono::seconds timeout) const override;
/// Gets path in backup used by a replicated table.
String getReplicatedTableDataPath(const String & table_zk_path) const override;
@ -54,10 +55,10 @@ public:
/// Sets that this replica is going to restore a partition in a replicated table.
/// The function returns false if this partition is being already restored by another replica.
bool startInsertingDataToPartitionInReplicatedTable(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & partition_name_) override;
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & partition_name) override;
/// Removes remotely stored information.
void drop() override;
@ -66,9 +67,12 @@ private:
void createRootNodes();
void removeAllNodes();
class ReplicatedDatabasesMetadataSync;
const String zookeeper_path;
const zkutil::GetZooKeeper get_zookeeper;
const Poco::Logger * log;
std::unique_ptr<ReplicatedDatabasesMetadataSync> replicated_databases_metadata_sync;
BackupCoordinationDistributedBarrier all_metadata_barrier;
};
}

View File

@ -3,7 +3,6 @@
#include <Common/Exception.h>
#include <Common/logger_useful.h>
#include <base/chrono_io.h>
#include <boost/range/adaptor/map.hpp>
namespace DB
@ -16,73 +15,73 @@ namespace ErrorCodes
RestoreCoordinationLocal::RestoreCoordinationLocal()
: log(&Poco::Logger::get("RestoreCoordinationLocal"))
: log(&Poco::Logger::get("RestoreCoordination"))
{}
RestoreCoordinationLocal::~RestoreCoordinationLocal() = default;
bool RestoreCoordinationLocal::startCreatingTableInReplicatedDB(
const String & /* host_id_ */,
const String & /* database_name_ */,
const String & /* database_zk_path_*/,
const String & /* table_name_ */)
const String & /* host_id */,
const String & /* database_name */,
const String & /* database_zk_path */,
const String & /* table_name */)
{
return true;
}
void RestoreCoordinationLocal::finishCreatingTableInReplicatedDB(
const String & /* host_id_ */,
const String & database_name_,
const String & /* database_zk_path_ */,
const String & table_name_,
const String & error_message_)
const String & /* host_id */,
const String & database_name,
const String & /* database_zk_path */,
const String & table_name,
const String & error_message)
{
if (error_message_.empty())
LOG_TRACE(log, "Created table {}.{}", database_name_, table_name_);
if (error_message.empty())
LOG_TRACE(log, "Created table {}.{}", database_name, table_name);
else
LOG_TRACE(log, "Failed to created table {}.{}: {}", database_name_, table_name_, error_message_);
LOG_TRACE(log, "Failed to created table {}.{}: {}", database_name, table_name, error_message);
}
/// Wait for another host to create a table in a replicated database.
void RestoreCoordinationLocal::waitForCreatingTableInReplicatedDB(
const String & /* database_name_ */,
const String & /* database_zk_path_ */,
const String & /* table_name_ */,
std::chrono::seconds /* timeout_ */)
void RestoreCoordinationLocal::waitForTableCreatedInReplicatedDB(
const String & /* database_name */,
const String & /* database_zk_path */,
const String & /* table_name */,
std::chrono::seconds /* timeout */)
{
}
void RestoreCoordinationLocal::finishRestoringMetadata(const String & /* host_id */, const String & error_message_)
void RestoreCoordinationLocal::finishRestoringMetadata(const String & /* host_id */, const String & error_message)
{
LOG_TRACE(log, "Finished restoring metadata{}", (error_message_.empty() ? "" : (" with error " + error_message_)));
LOG_TRACE(log, "Finished restoring metadata{}", (error_message.empty() ? "" : (" with error " + error_message)));
}
void RestoreCoordinationLocal::waitForAllHostsToRestoreMetadata(const Strings & /* host_ids_ */, std::chrono::seconds /* timeout_ */) const
void RestoreCoordinationLocal::waitForAllHostsRestoredMetadata(const Strings & /* host_ids */, std::chrono::seconds /* timeout */) const
{
}
void RestoreCoordinationLocal::setReplicatedTableDataPath(const String & /* host_id_ */,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & data_path_in_backup_)
void RestoreCoordinationLocal::addReplicatedTableDataPath(const String & /* host_id */,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & data_path_in_backup)
{
std::lock_guard lock{mutex};
auto it = replicated_tables_data_paths.find(table_zk_path_);
auto it = replicated_tables_data_paths.find(table_zk_path);
if (it == replicated_tables_data_paths.end())
{
ReplicatedTableDataPath new_info;
new_info.table_name = table_name_;
new_info.data_path_in_backup = data_path_in_backup_;
replicated_tables_data_paths.emplace(table_zk_path_, std::move(new_info));
new_info.table_name = table_name;
new_info.data_path_in_backup = data_path_in_backup;
replicated_tables_data_paths.emplace(table_zk_path, std::move(new_info));
return;
}
else
{
auto & cur_info = it->second;
if (table_name_ < cur_info.table_name)
if (table_name < cur_info.table_name)
{
cur_info.table_name = table_name_;
cur_info.data_path_in_backup = data_path_in_backup_;
cur_info.table_name = table_name;
cur_info.data_path_in_backup = data_path_in_backup;
}
}
}
@ -97,12 +96,12 @@ String RestoreCoordinationLocal::getReplicatedTableDataPath(const String & table
}
bool RestoreCoordinationLocal::startInsertingDataToPartitionInReplicatedTable(
const String & /* host_id_ */, const DatabaseAndTableName & table_name_, const String & table_zk_path_, const String & partition_name_)
const String & /* host_id */, const DatabaseAndTableName & table_name, const String & table_zk_path, const String & partition_name)
{
std::lock_guard lock{mutex};
auto key = std::pair{table_zk_path_, partition_name_};
auto it = replicated_tables_partitions.try_emplace(std::move(key), table_name_).first;
return it->second == table_name_;
auto key = std::pair{table_zk_path, partition_name};
auto it = replicated_tables_partitions.try_emplace(std::move(key), table_name).first;
return it->second == table_name;
}
}

View File

@ -20,48 +20,48 @@ public:
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
bool startCreatingTableInReplicatedDB(
const String & host_id_, const String & database_name_, const String & database_zk_path_, const String & table_name_) override;
const String & host_id, const String & database_name, const String & database_zk_path, const String & table_name) override;
/// Sets that either we have been created a table in a replicated database or failed doing that.
/// In the latter case `error_message` should be set.
/// Calling this function unblocks other hosts waiting for this table to be created (see waitForCreatingTableInReplicatedDB()).
void finishCreatingTableInReplicatedDB(
const String & host_id_,
const String & database_name_,
const String & database_zk_path_,
const String & table_name_,
const String & error_message_) override;
const String & host_id,
const String & database_name,
const String & database_zk_path,
const String & table_name,
const String & error_message) override;
/// Wait for another host to create a table in a replicated database.
void waitForCreatingTableInReplicatedDB(
const String & database_name_, const String & database_zk_path_, const String & table_name_, std::chrono::seconds timeout_) override;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
/// In the latter case `error_message` should be set.
void finishRestoringMetadata(const String & host_id_, const String & error_message_) override;
/// Waits for all hosts to finish restoring their metadata (i.e. to finish creating databases and tables). Returns false if time is out.
void waitForAllHostsToRestoreMetadata(const Strings & host_ids_, std::chrono::seconds timeout_) const override;
void waitForTableCreatedInReplicatedDB(
const String & database_name, const String & database_zk_path, const String & table_name, std::chrono::seconds timeout) override;
/// Sets path in backup used by a replicated table.
/// This function can be called multiple times for the same table with different `host_id`, and in that case
/// getReplicatedTableDataPath() will choose `data_path_in_backup` with the lexicographycally first `host_id`.
void setReplicatedTableDataPath(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & data_path_in_backup_) override;
void addReplicatedTableDataPath(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & data_path_in_backup) override;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
/// In the latter case `error_message` should be set.
void finishRestoringMetadata(const String & host_id, const String & error_message) override;
/// Waits for all hosts to finish restoring their metadata (i.e. to finish creating databases and tables). Returns false if time is out.
void waitForAllHostsRestoredMetadata(const Strings & host_ids, std::chrono::seconds timeout) const override;
/// Gets path in backup used by a replicated table.
String getReplicatedTableDataPath(const String & table_zk_path_) const override;
String getReplicatedTableDataPath(const String & table_zk_path) const override;
/// Sets that this replica is going to restore a partition in a replicated table.
/// The function returns false if this partition is being already restored by another replica.
bool startInsertingDataToPartitionInReplicatedTable(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & partition_name_) override;
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & partition_name) override;
private:
struct ReplicatedTableDataPath

View File

@ -34,8 +34,7 @@ using RestoreDatabaseCreationMode = RestoreTableCreationMode;
struct RestoreSettings : public StorageRestoreSettings
{
/// Base backup, with this setting we can override the location of the base backup while restoring.
/// Any incremental backup keeps inside the information about its base backup,
/// so using this setting is optional.
/// Any incremental backup keeps inside the information about its base backup, so using this setting is optional.
std::optional<BackupInfo> base_backup_info;
/// Password used to decrypt the backup.

View File

@ -24,9 +24,7 @@
#include <Storages/StorageReplicatedMergeTree.h>
#include <base/chrono_io.h>
#include <base/insertAtEnd.h>
#include <boost/range/adaptor/reversed.hpp>
#include <boost/range/algorithm_ext/erase.hpp>
#include <filesystem>
namespace DB
@ -43,7 +41,7 @@ namespace
class PathsInBackup
{
public:
explicit PathsInBackup(const IBackup & backup_) : backup(backup_) {}
explicit PathsInBackup(const IBackup & backup_) : backup(backup_) { }
std::vector<size_t> getShards() const
{
@ -96,7 +94,9 @@ namespace
std::vector<String> res;
String escaped_database_name = escapeForFileName(database_name);
insertAtEnd(res, backup.listFiles(fmt::format("shards/{}/replicas/{}/metadata/{}/", shard_index, replica_index, escaped_database_name)));
insertAtEnd(
res,
backup.listFiles(fmt::format("shards/{}/replicas/{}/metadata/{}/", shard_index, replica_index, escaped_database_name)));
insertAtEnd(res, backup.listFiles(fmt::format("shards/{}/metadata/{}/", shard_index, escaped_database_name)));
insertAtEnd(res, backup.listFiles(fmt::format("metadata/{}/", escaped_database_name)));
@ -172,10 +172,7 @@ namespace
class RestoreDatabaseTask : public IRestoreTask
{
public:
RestoreDatabaseTask(
ContextMutablePtr context_,
const ASTPtr & create_query_,
const RestoreSettingsPtr & restore_settings_)
RestoreDatabaseTask(ContextMutablePtr context_, const ASTPtr & create_query_, const RestoreSettingsPtr & restore_settings_)
: context(context_)
, create_query(typeid_cast<std::shared_ptr<ASTCreateQuery>>(create_query_))
, restore_settings(restore_settings_)
@ -201,6 +198,7 @@ namespace
auto cloned_create_query = typeid_cast<std::shared_ptr<ASTCreateQuery>>(create_query->clone());
cloned_create_query->if_not_exists = (restore_settings->create_database == RestoreDatabaseCreationMode::kCreateIfNotExists);
InterpreterCreateQuery create_interpreter{cloned_create_query, context};
create_interpreter.setInternal(true);
create_interpreter.execute();
}
@ -244,52 +242,6 @@ namespace
};
class RestoreTableDataTask : public IRestoreTask
{
public:
RestoreTableDataTask(
ContextMutablePtr context_,
StoragePtr storage_,
const ASTs & partitions_,
const BackupPtr & backup_,
const String & data_path_in_backup_,
const RestoreSettingsPtr & restore_settings_,
const std::shared_ptr<IRestoreCoordination> & restore_coordination_)
: context(context_)
, storage(storage_)
, partitions(partitions_)
, backup(backup_)
, data_path_in_backup(data_path_in_backup_)
, restore_settings(restore_settings_)
, restore_coordination(restore_coordination_)
{
}
RestoreTasks run() override
{
const auto * replicated_table = typeid_cast<const StorageReplicatedMergeTree *>(storage.get());
if (replicated_table)
{
data_path_in_backup = restore_coordination->getReplicatedTableDataPath(
replicated_table->getZooKeeperName() + replicated_table->getZooKeeperPath());
}
RestoreTasks tasks;
tasks.emplace_back(storage->restoreData(context, partitions, backup, data_path_in_backup, *restore_settings, restore_coordination));
return tasks;
}
private:
ContextMutablePtr context;
StoragePtr storage;
ASTs partitions;
BackupPtr backup;
String data_path_in_backup;
RestoreSettingsPtr restore_settings;
std::shared_ptr<IRestoreCoordination> restore_coordination;
};
/// Restores a table.
class RestoreTableTask : public IRestoreTask
{
@ -393,7 +345,8 @@ namespace
if (!replicated_database)
return;
restore_coordination->waitForCreatingTableInReplicatedDB(table_name.first, replicated_database->getZooKeeperPath(), table_name.second);
restore_coordination->waitForTableCreatedInReplicatedDB(
table_name.first, replicated_database->getZooKeeperPath(), table_name.second);
/// The table `table_name` was created on other host, must be in the replicated database's queue,
/// we have to wait until the replicated database syncs that.
@ -402,7 +355,8 @@ namespace
bool use_timeout = (timeout_for_restoring_metadata.count() > 0);
while (!database->isTableExist(table_name.second, context))
{
if (replicated_database_synced || (use_timeout && (std::chrono::steady_clock::now() - start_time) >= timeout_for_restoring_metadata))
if (replicated_database_synced
|| (use_timeout && (std::chrono::steady_clock::now() - start_time) >= timeout_for_restoring_metadata))
{
throw Exception(
ErrorCodes::CANNOT_RESTORE_TABLE,
@ -423,7 +377,8 @@ namespace
if (!restore_settings->structure_only)
{
data_path_in_backup = PathsInBackup{*backup}.getDataPath(table_name_in_backup, restore_settings->shard_num_in_backup, restore_settings->replica_num_in_backup);
data_path_in_backup = PathsInBackup{*backup}.getDataPath(
table_name_in_backup, restore_settings->shard_num_in_backup, restore_settings->replica_num_in_backup);
has_data = !backup->listFiles(data_path_in_backup).empty();
const auto * replicated_table = typeid_cast<const StorageReplicatedMergeTree *>(storage.get());
@ -435,7 +390,7 @@ namespace
/// That's why we use the restore coordination here: on restoring metadata stage each replica sets its own
/// `data_path_in_backup` for same zookeeper path, and then the restore coordination choose one `data_path_in_backup`
/// to use for restoring data.
restore_coordination->setReplicatedTableDataPath(
restore_coordination->addReplicatedTableDataPath(
restore_settings->host_id,
table_name_in_backup,
replicated_table->getZooKeeperName() + replicated_table->getZooKeeperPath(),
@ -528,7 +483,8 @@ namespace
return {};
RestoreTasks tasks;
tasks.emplace_back(std::make_unique<RestoreTableDataTask>(context, storage, partitions, backup, data_path_in_backup, restore_settings, restore_coordination));
tasks.emplace_back(
storage->restoreData(context, partitions, backup, data_path_in_backup, *restore_settings, restore_coordination));
return tasks;
}
@ -579,21 +535,18 @@ namespace
{
switch (element.type)
{
case ElementType::TABLE:
{
case ElementType::TABLE: {
prepareToRestoreTable(element.name, element.partitions);
break;
}
case ElementType::DATABASE:
{
case ElementType::DATABASE: {
const String & database_name = element.name.first;
prepareToRestoreDatabase(database_name, element.except_list);
break;
}
case ElementType::ALL_DATABASES:
{
case ElementType::ALL_DATABASES: {
prepareToRestoreAllDatabases(element.except_list);
break;
}
@ -612,7 +565,15 @@ namespace
/// TODO: We need to restore tables according to their dependencies.
for (const auto & info : tables | boost::adaptors::map_values)
res.push_back(std::make_unique<RestoreTableTask>(context, info.create_query, info.partitions, backup, info.name_in_backup, restore_settings_ptr, restore_coordination, timeout_for_restoring_metadata));
res.push_back(std::make_unique<RestoreTableTask>(
context,
info.create_query,
info.partitions,
backup,
info.name_in_backup,
restore_settings_ptr,
restore_coordination,
timeout_for_restoring_metadata));
return res;
}
@ -645,12 +606,11 @@ namespace
{
if (replicas_in_backup.size() == 1)
restore_settings.replica_num_in_backup = replicas_in_backup[0];
else
else if (std::find(replicas_in_backup.begin(), replicas_in_backup.end(), replica_num) != replicas_in_backup.end())
restore_settings.replica_num_in_backup = replica_num;
else
restore_settings.replica_num_in_backup = replicas_in_backup[0];
}
if (std::find(replicas_in_backup.begin(), replicas_in_backup.end(), restore_settings.replica_num_in_backup) == replicas_in_backup.end())
throw Exception(ErrorCodes::BACKUP_ENTRY_NOT_FOUND, "No replica #{} in backup", restore_settings.replica_num_in_backup);
}
/// Prepares to restore a single table and probably its database's definition.
@ -659,7 +619,8 @@ namespace
/// Check that we are not trying to restore the same table again.
DatabaseAndTableName new_table_name = renaming_settings.getNewTableName(table_name_);
if (tables.contains(new_table_name))
throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore the {} twice", formatTableNameOrTemporaryTableName(new_table_name));
throw Exception(
ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore the {} twice", formatTableNameOrTemporaryTableName(new_table_name));
/// Make a create query for this table.
auto create_query = renameInCreateQuery(readCreateQueryFromBackup(table_name_));
@ -677,14 +638,19 @@ namespace
/// Check that we are not trying to restore the same database again.
String new_database_name = renaming_settings.getNewDatabaseName(database_name_);
if (databases.contains(new_database_name))
throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} twice", backQuoteIfNeed(new_database_name));
throw Exception(
ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} twice", backQuoteIfNeed(new_database_name));
Strings table_names = PathsInBackup{*backup}.getTables(database_name_, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
Strings table_names = PathsInBackup{*backup}.getTables(
database_name_, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
bool has_tables_in_backup = !table_names.empty();
bool has_create_query_in_backup = hasCreateQueryInBackup(database_name_);
if (!has_create_query_in_backup && !has_tables_in_backup)
throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} because there is no such database in the backup", backQuoteIfNeed(database_name_));
throw Exception(
ErrorCodes::CANNOT_RESTORE_DATABASE,
"Cannot restore the database {} because there is no such database in the backup",
backQuoteIfNeed(database_name_));
/// Of course we're not going to restore the definition of the system or the temporary database.
if (!isSystemOrTemporaryDatabase(new_database_name))
@ -718,7 +684,8 @@ namespace
/// Prepares to restore all the databases contained in the backup.
void prepareToRestoreAllDatabases(const std::set<String> & except_list_)
{
for (const String & database_name : PathsInBackup{*backup}.getDatabases(restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup))
for (const String & database_name :
PathsInBackup{*backup}.getDatabases(restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup))
{
if (except_list_.contains(database_name))
continue;
@ -729,36 +696,46 @@ namespace
/// Reads a create query for creating a specified table from the backup.
std::shared_ptr<ASTCreateQuery> readCreateQueryFromBackup(const DatabaseAndTableName & table_name) const
{
String create_query_path = PathsInBackup{*backup}.getMetadataPath(table_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
String create_query_path = PathsInBackup{*backup}.getMetadataPath(
table_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
if (!backup->fileExists(create_query_path))
throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore the {} because there is no such table in the backup",
formatTableNameOrTemporaryTableName(table_name));
throw Exception(
ErrorCodes::CANNOT_RESTORE_TABLE,
"Cannot restore the {} because there is no such table in the backup",
formatTableNameOrTemporaryTableName(table_name));
auto read_buffer = backup->readFile(create_query_path)->getReadBuffer();
String create_query_str;
readStringUntilEOF(create_query_str, *read_buffer);
read_buffer.reset();
ParserCreateQuery create_parser;
return typeid_cast<std::shared_ptr<ASTCreateQuery>>(parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
return typeid_cast<std::shared_ptr<ASTCreateQuery>>(
parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
}
/// Reads a create query for creating a specified database from the backup.
std::shared_ptr<ASTCreateQuery> readCreateQueryFromBackup(const String & database_name) const
{
String create_query_path = PathsInBackup{*backup}.getMetadataPath(database_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
String create_query_path = PathsInBackup{*backup}.getMetadataPath(
database_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
if (!backup->fileExists(create_query_path))
throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} because there is no such database in the backup", backQuoteIfNeed(database_name));
throw Exception(
ErrorCodes::CANNOT_RESTORE_DATABASE,
"Cannot restore the database {} because there is no such database in the backup",
backQuoteIfNeed(database_name));
auto read_buffer = backup->readFile(create_query_path)->getReadBuffer();
String create_query_str;
readStringUntilEOF(create_query_str, *read_buffer);
read_buffer.reset();
ParserCreateQuery create_parser;
return typeid_cast<std::shared_ptr<ASTCreateQuery>>(parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
return typeid_cast<std::shared_ptr<ASTCreateQuery>>(
parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
}
/// Whether there is a create query for creating a specified database in the backup.
bool hasCreateQueryInBackup(const String & database_name) const
{
String create_query_path = PathsInBackup{*backup}.getMetadataPath(database_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
String create_query_path = PathsInBackup{*backup}.getMetadataPath(
database_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
return backup->fileExists(create_query_path);
}
@ -799,17 +776,71 @@ namespace
std::map<String /* new_db_name */, CreateDatabaseInfo> databases;
std::map<DatabaseAndTableName /* new_table_name */, CreateTableInfo> tables;
};
}
RestoreTasks makeRestoreTasks(ContextMutablePtr context, const BackupPtr & backup, const Elements & elements, const RestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination, std::chrono::seconds timeout_for_restoring_metadata)
{
try
RestoreTasks makeRestoreTasksImpl(
ContextMutablePtr context,
const BackupPtr & backup,
const Elements & elements,
const RestoreSettings & restore_settings,
const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata)
{
RestoreTasksBuilder builder{context, backup, restore_settings, restore_coordination, timeout_for_restoring_metadata};
builder.prepare(elements);
return builder.makeTasks();
}
void restoreMetadataImpl(RestoreTasks & restore_tasks)
{
/// There are two kinds of restore tasks: sequential and non-sequential ones.
/// Sequential tasks are executed first and always in one thread.
std::deque<std::unique_ptr<IRestoreTask>> restore_metadata_tasks;
boost::range::remove_erase_if(
restore_tasks,
[&restore_metadata_tasks](RestoreTaskPtr & task)
{
if (task->getRestoreKind() == IRestoreTask::RestoreKind::METADATA)
{
restore_metadata_tasks.push_back(std::move(task));
return true;
}
return false;
});
/// Sequential tasks.
while (!restore_metadata_tasks.empty())
{
auto current_task = std::move(restore_metadata_tasks.front());
restore_metadata_tasks.pop_front();
RestoreTasks new_tasks = current_task->run();
for (auto & task : new_tasks)
{
if (task->getRestoreKind() == IRestoreTask::RestoreKind::METADATA)
restore_metadata_tasks.push_back(std::move(task));
else
restore_tasks.push_back(std::move(task));
}
}
}
}
RestoreTasks makeRestoreTasks(
ContextMutablePtr context,
const BackupPtr & backup,
const Elements & elements,
const RestoreSettings & restore_settings,
const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata)
{
try
{
return makeRestoreTasksImpl(context, backup, elements, restore_settings, restore_coordination, timeout_for_restoring_metadata);
}
catch (...)
{
restore_coordination->finishRestoringMetadata(restore_settings.host_id, getCurrentExceptionMessage(false));
@ -818,40 +849,15 @@ RestoreTasks makeRestoreTasks(ContextMutablePtr context, const BackupPtr & backu
}
void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool, const RestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata)
void restoreMetadata(
RestoreTasks & restore_tasks,
const RestoreSettings & restore_settings,
const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata)
{
std::deque<std::unique_ptr<IRestoreTask>> sequential_tasks;
std::deque<std::unique_ptr<IRestoreTask>> enqueued_tasks;
try
{
/// There are two kinds of restore tasks: sequential and non-sequential ones.
/// Sequential tasks are executed first and always in one thread.
for (auto & task : restore_tasks)
{
if (task->getRestoreKind() == IRestoreTask::RestoreKind::METADATA)
sequential_tasks.push_back(std::move(task));
else
enqueued_tasks.push_back(std::move(task));
}
/// Sequential tasks.
while (!sequential_tasks.empty())
{
auto current_task = std::move(sequential_tasks.front());
sequential_tasks.pop_front();
RestoreTasks new_tasks = current_task->run();
for (auto & task : new_tasks)
{
if (task->getRestoreKind() == IRestoreTask::RestoreKind::METADATA)
sequential_tasks.push_back(std::move(task));
else
enqueued_tasks.push_back(std::move(task));
}
}
restoreMetadataImpl(restore_tasks);
}
catch (...)
{
@ -863,13 +869,18 @@ void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool
/// We need this waiting because we're going to call some functions which requires data collected from other nodes too,
/// see IRestoreCoordination::checkTablesNotExistedInReplicatedDBs(), IRestoreCoordination::getReplicatedTableDataPath().
restore_coordination->finishRestoringMetadata(restore_settings.host_id);
if (!restore_settings.host_id.empty())
{
restore_coordination->waitForAllHostsToRestoreMetadata(
BackupSettings::Util::filterHostIDs(
restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num),
timeout_for_restoring_metadata);
}
restore_coordination->waitForAllHostsRestoredMetadata(
BackupSettings::Util::filterHostIDs(
restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num),
timeout_for_restoring_metadata);
}
void restoreData(RestoreTasks & restore_tasks, ThreadPool & thread_pool)
{
std::deque<std::unique_ptr<IRestoreTask>> tasks(std::make_move_iterator(restore_tasks.begin()), std::make_move_iterator(restore_tasks.end()));
restore_tasks.clear();
/// Non-sequential tasks.
size_t num_active_jobs = 0;
@ -882,15 +893,15 @@ void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool
std::unique_ptr<IRestoreTask> current_task;
{
std::unique_lock lock{mutex};
event.wait(lock, [&] { return !enqueued_tasks.empty() || exception || !num_active_jobs; });
if ((enqueued_tasks.empty() && !num_active_jobs) || exception)
event.wait(lock, [&] { return !tasks.empty() || exception || !num_active_jobs; });
if ((tasks.empty() && !num_active_jobs) || exception)
break;
current_task = std::move(enqueued_tasks.front());
enqueued_tasks.pop_front();
current_task = std::move(tasks.front());
tasks.pop_front();
++num_active_jobs;
}
auto job = [current_task = std::shared_ptr<IRestoreTask>(std::move(current_task)), &enqueued_tasks, &num_active_jobs, &exception, &mutex, &event]() mutable
auto job = [current_task = std::shared_ptr<IRestoreTask>(std::move(current_task)), &tasks, &num_active_jobs, &exception, &mutex, &event]() mutable
{
SCOPE_EXIT({
--num_active_jobs;
@ -917,8 +928,7 @@ void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool
{
std::lock_guard lock{mutex};
enqueued_tasks.insert(
enqueued_tasks.end(), std::make_move_iterator(new_tasks.begin()), std::make_move_iterator(new_tasks.end()));
tasks.insert(tasks.end(), std::make_move_iterator(new_tasks.begin()), std::make_move_iterator(new_tasks.end()));
}
};
@ -935,4 +945,64 @@ void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool
std::rethrow_exception(exception);
}
/// Returns access required to execute RESTORE query.
AccessRightsElements getRequiredAccessToRestore(const ASTBackupQuery::Elements & elements, const RestoreSettings & restore_settings)
{
AccessRightsElements required_access;
for (const auto & element : elements)
{
switch (element.type)
{
case ASTBackupQuery::TABLE:
{
if (element.is_temp_db)
{
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
required_access.emplace_back(AccessType::CREATE_TEMPORARY_TABLE);
break;
}
AccessFlags flags = AccessType::SHOW_TABLES;
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
flags |= AccessType::CREATE_TABLE;
if (!restore_settings.structure_only)
flags |= AccessType::INSERT;
required_access.emplace_back(flags, element.new_name.first, element.new_name.second);
break;
}
case ASTBackupQuery::DATABASE:
{
if (element.is_temp_db)
{
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
required_access.emplace_back(AccessType::CREATE_TEMPORARY_TABLE);
break;
}
AccessFlags flags = AccessType::SHOW_TABLES | AccessType::SHOW_DATABASES;
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
flags |= AccessType::CREATE_TABLE;
if (restore_settings.create_database != RestoreDatabaseCreationMode::kMustExist)
flags |= AccessType::CREATE_DATABASE;
if (!restore_settings.structure_only)
flags |= AccessType::INSERT;
required_access.emplace_back(flags, element.new_name.first);
break;
}
case ASTBackupQuery::ALL_DATABASES:
{
AccessFlags flags = AccessType::SHOW_TABLES | AccessType::SHOW_DATABASES;
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
flags |= AccessType::CREATE_TABLE;
if (restore_settings.create_database != RestoreDatabaseCreationMode::kMustExist)
flags |= AccessType::CREATE_DATABASE;
if (!restore_settings.structure_only)
flags |= AccessType::INSERT;
required_access.emplace_back(flags);
break;
}
}
}
return required_access;
}
}

View File

@ -14,6 +14,7 @@ using RestoreTaskPtr = std::unique_ptr<IRestoreTask>;
using RestoreTasks = std::vector<RestoreTaskPtr>;
struct RestoreSettings;
class IRestoreCoordination;
class AccessRightsElements;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
using ContextMutablePtr = std::shared_ptr<Context>;
@ -22,6 +23,16 @@ using ContextMutablePtr = std::shared_ptr<Context>;
RestoreTasks makeRestoreTasks(ContextMutablePtr context, const BackupPtr & backup, const ASTBackupQuery::Elements & elements, const RestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination, std::chrono::seconds timeout_for_restoring_metadata);
/// Executes restore tasks.
void executeRestoreTasks(RestoreTasks && tasks, ThreadPool & thread_pool, const RestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination, std::chrono::seconds timeout_for_restoring_metadata);
void restoreMetadata(
RestoreTasks & restore_tasks,
const RestoreSettings & restore_settings,
const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata);
void restoreData(RestoreTasks & restore_tasks, ThreadPool & thread_pool);
/// Returns access required to execute RESTORE query.
AccessRightsElements getRequiredAccessToRestore(const ASTBackupQuery::Elements & elements, const RestoreSettings & restore_settings);
}

View File

@ -180,7 +180,7 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
writer = std::make_shared<BackupWriterFile>(path);
else
writer = std::make_shared<BackupWriterDisk>(disk, path);
return std::make_unique<BackupImpl>(backup_name, archive_params, params.base_backup_info, writer, params.context, params.backup_uuid, params.is_internal_backup, params.coordination_zk_path);
return std::make_unique<BackupImpl>(backup_name, archive_params, params.base_backup_info, writer, params.context, params.backup_uuid, params.is_internal_backup, params.backup_coordination);
}
};

View File

@ -19,7 +19,7 @@
/** This file was edited for ClickHouse.
*/
#include <string.h>
#include <cstring>
#include <Common/Elf.h>
#include <Common/Dwarf.h>

View File

@ -4,7 +4,7 @@
#include <Common/Exception.h>
#include <base/unaligned.h>
#include <string.h>
#include <cstring>
namespace DB

View File

@ -623,7 +623,7 @@
M(652, ONLY_NULLS_WHILE_READING_SCHEMA) \
M(653, CANNOT_PARSE_BACKUP_SETTINGS) \
M(654, WRONG_BACKUP_SETTINGS) \
M(655, FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE) \
M(655, FAILED_TO_SYNC_BACKUP_OR_RESTORE) \
\
M(999, KEEPER_EXCEPTION) \
M(1000, POCO_EXCEPTION) \

View File

@ -1,6 +1,6 @@
#include "Exception.h"
#include <string.h>
#include <cstring>
#include <cxxabi.h>
#include <cstdlib>
#include <Poco/String.h>

View File

@ -1,7 +1,7 @@
#include <Common/IO.h>
#include <unistd.h>
#include <errno.h>
#include <cerrno>
#include <cstring>
bool writeRetry(int fd, const char * data, size_t size)

View File

@ -3,7 +3,7 @@
#include <fcntl.h>
#include <dlfcn.h>
#include <unistd.h>
#include <time.h>
#include <ctime>
#include <csignal>
#include <Common/logger_useful.h>

View File

@ -2,7 +2,7 @@
#include <sys/file.h>
#include <fcntl.h>
#include <errno.h>
#include <cerrno>
#include <Common/logger_useful.h>
#include <base/errnoToString.h>

View File

@ -93,6 +93,13 @@ String toString(TargetArch arch);
#define USE_MULTITARGET_CODE 1
#if defined(__clang__)
#define AVX512_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f")))
#define AVX2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2")))
#define AVX_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx"))
#define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt")))
#define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE
# define BEGIN_AVX512F_SPECIFIC_CODE \
_Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f\"))),apply_to=function)")
# define BEGIN_AVX2_SPECIFIC_CODE \
@ -109,6 +116,13 @@ String toString(TargetArch arch);
*/
# define DUMMY_FUNCTION_DEFINITION [[maybe_unused]] void _dummy_function_definition();
#else
#define AVX512_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,tune=native")))
#define AVX2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,tune=native")))
#define AVX_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,tune=native")))
#define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt",tune=native))))
#define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE
# define BEGIN_AVX512F_SPECIFIC_CODE \
_Pragma("GCC push_options") \
_Pragma("GCC target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,tune=native\")")
@ -212,4 +226,74 @@ DECLARE_AVX512F_SPECIFIC_CODE(
constexpr auto BuildArch = TargetArch::AVX512F; /// NOLINT
) // DECLARE_AVX512F_SPECIFIC_CODE
/** Runtime Dispatch helpers for class members.
*
* Example of usage:
*
* class TestClass
* {
* public:
* MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(testFunctionImpl,
* MULTITARGET_FH(int), /\*testFunction*\/ MULTITARGET_FB((int value)
* {
* return value;
* })
* )
*
* void testFunction(int value) {
* if (isArchSupported(TargetArch::AVX2))
* {
* testFunctionImplAVX2(value);
* }
* else if (isArchSupported(TargetArch::SSE42))
* {
* testFunctionImplSSE42(value);
* }
* else
* {
* testFunction(value);
* }
* }
*};
*
*/
/// Function header
#define MULTITARGET_FH(...) __VA_ARGS__
/// Function body
#define MULTITARGET_FB(...) __VA_ARGS__
#if ENABLE_MULTITARGET_CODE && defined(__GNUC__) && defined(__x86_64__)
/// NOLINTNEXTLINE
#define MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(name, FUNCTION_HEADER, FUNCTION_BODY) \
FUNCTION_HEADER \
\
AVX2_FUNCTION_SPECIFIC_ATTRIBUTE \
name##AVX2 \
FUNCTION_BODY \
\
FUNCTION_HEADER \
\
AVX2_FUNCTION_SPECIFIC_ATTRIBUTE \
name##SSE42 \
FUNCTION_BODY \
\
FUNCTION_HEADER \
\
name \
FUNCTION_BODY \
#else
/// NOLINTNEXTLINE
#define MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(name, FUNCTION_HEADER, FUNCTION_BODY) \
FUNCTION_HEADER \
\
name \
FUNCTION_BODY \
#endif
}

View File

@ -9,10 +9,10 @@
#include "hasLinuxCapability.h"
#include <base/unaligned.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sys/socket.h>
#include <linux/genetlink.h>
#include <linux/netlink.h>

View File

@ -1,4 +1,4 @@
#include <signal.h>
#include <csignal>
#include <sys/time.h>
#if defined(OS_LINUX)
# include <sys/sysinfo.h>

View File

@ -1,4 +1,4 @@
#include <string.h>
#include <cstring>
#include <string_view>
#include <Common/clearPasswordFromCommandLine.h>

View File

@ -1,6 +1,6 @@
#include <Common/createHardLink.h>
#include <Common/Exception.h>
#include <errno.h>
#include <cerrno>
#include <unistd.h>
#include <sys/stat.h>

View File

@ -1,5 +1,5 @@
#if defined(OS_LINUX)
#include <stdlib.h>
#include <cstdlib>
/// Interposing these symbols explicitly. The idea works like this: malloc.cpp compiles to a
/// dedicated object (namely clickhouse_malloc.o), and it will show earlier in the link command

View File

@ -1,4 +1,4 @@
#include <time.h>
#include <ctime>
#include <unistd.h>
#include <sys/types.h>
#include <Common/Exception.h>

View File

@ -1,5 +1,5 @@
#include <city.h>
#include <string.h>
#include <cstring>
#include <base/unaligned.h>
#include <base/types.h>

View File

@ -13,7 +13,7 @@
#include <IO/BitHelpers.h>
#include <IO/WriteHelpers.h>
#include <string.h>
#include <cstring>
#include <algorithm>
#include <cstdlib>
#include <type_traits>

View File

@ -11,7 +11,7 @@
#include <IO/ReadBufferFromMemory.h>
#include <IO/BitHelpers.h>
#include <string.h>
#include <cstring>
#include <algorithm>
#include <type_traits>

View File

@ -1,6 +1,6 @@
#include "LZ4_decompress_faster.h"
#include <string.h>
#include <cstring>
#include <iostream>
#include <Core/Defines.h>
#include <Common/Stopwatch.h>

View File

@ -22,7 +22,7 @@
#include <typeinfo>
#include <vector>
#include <string.h>
#include <cstring>
/// For the expansion of gtest macros.
#if defined(__clang__)

View File

@ -4,7 +4,7 @@
#include <Columns/ColumnArray.h>
#include <Columns/ColumnMap.h>
#include <Common/typeid_cast.h>
#include <string.h>
#include <cstring>
#include <boost/program_options/options_description.hpp>
namespace DB

View File

@ -590,6 +590,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
M(Bool, throw_if_no_data_to_insert, true, "Enables or disables empty INSERTs, enabled by default", 0) \
M(UInt64, hive_max_query_partitions, 120, "Limit the max partitions could be queried for each hive table for safty.", 0) \
M(Bool, compatibility_ignore_auto_increment_in_create_table, false, "Ignore AUTO_INCREMENT keyword in column declaration if true, otherwise return error. It simplifies migration from MySQL", 0) \
// End of COMMON_SETTINGS
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS and move obsolete settings to OBSOLETE_SETTINGS.

View File

@ -13,9 +13,9 @@
#if defined(__linux__)
#include <sys/prctl.h>
#endif
#include <errno.h>
#include <string.h>
#include <signal.h>
#include <cerrno>
#include <cstring>
#include <csignal>
#include <unistd.h>
#include <typeinfo>

View File

@ -21,7 +21,7 @@
#if USE_SENTRY && !defined(KEEPER_STANDALONE_BUILD)
# include <sentry.h>
# include <stdio.h>
# include <cstdio>
# include <filesystem>
namespace fs = std::filesystem;

View File

@ -1,4 +1,4 @@
#include <string.h>
#include <cstring>
#include <Common/typeid_cast.h>
#include <Common/assert_cast.h>

View File

@ -627,27 +627,27 @@ bool DiskLocal::setup()
/// Try to create a new checker file. The disk status can be either broken or readonly.
if (disk_checker_magic_number == -1)
try
{
pcg32_fast rng(randomSeed());
UInt32 magic_number = rng();
try
{
auto buf = writeFile(disk_checker_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {});
writeIntBinary(magic_number, *buf);
pcg32_fast rng(randomSeed());
UInt32 magic_number = rng();
{
auto buf = writeFile(disk_checker_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite, {});
writeIntBinary(magic_number, *buf);
}
disk_checker_magic_number = magic_number;
}
catch (...)
{
LOG_WARNING(
logger,
"Cannot create/write to {0}. Disk {1} is either readonly or broken. Without setting up disk checker file, DiskLocalCheckThread "
"will not be started. Disk is assumed to be RW. Try manually fix the disk and do `SYSTEM RESTART DISK {1}`",
disk_checker_path,
name);
disk_checker_can_check_read = false;
return true;
}
disk_checker_magic_number = magic_number;
}
catch (...)
{
LOG_WARNING(
logger,
"Cannot create/write to {0}. Disk {1} is either readonly or broken. Without setting up disk checker file, DiskLocalCheckThread "
"will not be started. Disk is assumed to be RW. Try manually fix the disk and do `SYSTEM RESTART DISK {1}`",
disk_checker_path,
name);
disk_checker_can_check_read = false;
return true;
}
if (disk_checker_magic_number == -1)
throw Exception("disk_checker_magic_number is not initialized. It's a bug", ErrorCodes::LOGICAL_ERROR);

View File

@ -126,7 +126,7 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getCacheReadBuffer(size_t of
local_read_settings.local_fs_method = LocalFSReadMethod::pread;
auto buf = createReadBufferFromFileBase(path, local_read_settings);
auto from_fd = dynamic_cast<ReadBufferFromFileDescriptor*>(buf.get());
auto * from_fd = dynamic_cast<ReadBufferFromFileDescriptor*>(buf.get());
if (from_fd && from_fd->size() == 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to read from an empty cache file: {}", path);

View File

@ -37,7 +37,7 @@ capnp::StructSchema CapnProtoSchemaParser::getMessageSchema(const FormatSchemaIn
try
{
int fd;
KJ_SYSCALL(fd = open(schema_info.schemaDirectory().data(), O_RDONLY));
KJ_SYSCALL(fd = open(schema_info.schemaDirectory().data(), O_RDONLY)); // NOLINT(bugprone-suspicious-semicolon)
auto schema_dir = kj::newDiskDirectory(kj::OsFileHandle(fd));
schema = impl.parseFromDirectory(*schema_dir, kj::Path::parse(schema_info.schemaPath()), {});
}

View File

@ -24,7 +24,7 @@
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant"
#pragma GCC diagnostic ignored "-Wunused-macros"
#include <string.h>
#include <cstring>
#define TOTAL_KEYWORDS 4942
#define MIN_WORD_LENGTH 4

View File

@ -700,8 +700,8 @@ ColumnPtr FunctionArrayElement::executeArgument(
|| (res = executeConst<IndexType>(arguments, result_type, index_data, builder, input_rows_count))
|| (res = executeString<IndexType>(arguments, index_data, builder))
|| (res = executeGeneric<IndexType>(arguments, index_data, builder))))
throw Exception("Illegal column " + arguments[0].column->getName()
+ " of first argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN);
throw Exception("Illegal column " + arguments[0].column->getName()
+ " of first argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN);
return res;
}

View File

@ -3,7 +3,7 @@
#if USE_H3
#include <array>
#include <math.h>
#include <cmath>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>

View File

@ -3,7 +3,7 @@
#if USE_H3
#include <array>
#include <math.h>
#include <cmath>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnTuple.h>
#include <Columns/ColumnsNumber.h>

View File

@ -7,7 +7,7 @@
#include <Functions/extractTimeZoneFromFunctionArguments.h>
#include <time.h>
#include <ctime>
namespace DB

View File

@ -8,7 +8,7 @@
#include <Common/assert_cast.h>
#include <time.h>
#include <ctime>
namespace DB

View File

@ -151,21 +151,21 @@ private:
}
return dst;
}
else if (const_cnt == 2)
{
const auto * col_const_x = assert_cast<const ColumnConst *> (col_x);
const auto * col_const_y = assert_cast<const ColumnConst *> (col_y);
size_t start_index = 0;
UInt8 res = isPointInEllipses(col_const_x->getValue<Float64>(), col_const_y->getValue<Float64>(), ellipses.data(), ellipses_count, start_index);
return DataTypeUInt8().createColumnConst(size, res);
}
else
{
throw Exception(
"Illegal types " + col_x->getName() + ", " + col_y->getName() + " of arguments 1, 2 of function " + getName() + ". Both must be either const or vector",
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
}
}
else if (const_cnt == 2)
{
const auto * col_const_x = assert_cast<const ColumnConst *> (col_x);
const auto * col_const_y = assert_cast<const ColumnConst *> (col_y);
size_t start_index = 0;
UInt8 res = isPointInEllipses(col_const_x->getValue<Float64>(), col_const_y->getValue<Float64>(), ellipses.data(), ellipses_count, start_index);
return DataTypeUInt8().createColumnConst(size, res);
}
else
{
throw Exception(
"Illegal types " + col_x->getName() + ", " + col_y->getName() + " of arguments 1, 2 of function " + getName() + ". Both must be either const or vector",
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
}
}
static bool isPointInEllipses(Float64 x, Float64 y, const Ellipse * ellipses, size_t ellipses_count, size_t & start_index)

View File

@ -3,7 +3,7 @@
#include <IO/AsynchronousReadBufferFromFile.h>
#include <IO/WriteHelpers.h>
#include <Common/ProfileEvents.h>
#include <errno.h>
#include <cerrno>
namespace ProfileEvents

View File

@ -1,5 +1,5 @@
#include <errno.h>
#include <time.h>
#include <cerrno>
#include <ctime>
#include <optional>
#include <Common/ProfileEvents.h>
#include <Common/Stopwatch.h>

Some files were not shown because too many files have changed in this diff Show More