Merge branch 'master' into wv-engine

This commit is contained in:
mergify[bot] 2022-05-17 02:24:18 +00:00 committed by GitHub
commit 0d0fc23170
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
191 changed files with 4796 additions and 2085 deletions

View File

@ -5,38 +5,21 @@ Checks: '*,
-android-*,
-bugprone-assert-side-effect,
-bugprone-branch-clone,
-bugprone-dynamic-static-initializers,
-bugprone-easily-swappable-parameters,
-bugprone-exception-escape,
-bugprone-forwarding-reference-overload,
-bugprone-implicit-widening-of-multiplication-result,
-bugprone-lambda-function-name,
-bugprone-misplaced-widening-cast,
-bugprone-narrowing-conversions,
-bugprone-no-escape,
-bugprone-not-null-terminated-result,
-bugprone-signal-handler,
-bugprone-spuriously-wake-up-functions,
-bugprone-suspicious-semicolon,
-bugprone-unhandled-exception-at-new,
-bugprone-unhandled-self-assignment,
-cert-dcl16-c,
-cert-dcl37-c,
-cert-dcl51-cpp,
-cert-dcl58-cpp,
-cert-err58-cpp,
-cert-err60-cpp,
-cert-msc32-c,
-cert-msc51-cpp,
-cert-oop54-cpp,
-cert-oop57-cpp,
-cert-oop58-cpp,
-clang-analyzer-core.DynamicTypePropagation,
-clang-analyzer-core.uninitialized.CapturedBlockVariable,
-clang-analyzer-optin.performance.Padding,
-clang-analyzer-optin.portability.UnixAPI,
@ -53,7 +36,6 @@ Checks: '*,
-fuchsia-*,
-google-build-using-namespace,
-google-global-names-in-headers,
-google-readability-braces-around-statements,
-google-readability-function-size,
-google-readability-namespace-comments,
@ -63,7 +45,6 @@ Checks: '*,
-hicpp-avoid-c-arrays,
-hicpp-avoid-goto,
-hicpp-braces-around-statements,
-hicpp-deprecated-headers,
-hicpp-explicit-conversions,
-hicpp-function-size,
-hicpp-invalid-access-moved,
@ -79,7 +60,6 @@ Checks: '*,
-hicpp-uppercase-literal-suffix,
-hicpp-use-auto,
-hicpp-use-emplace,
-hicpp-use-equals-default,
-hicpp-use-noexcept,
-hicpp-use-override,
-hicpp-vararg,
@ -90,40 +70,27 @@ Checks: '*,
-openmp-*,
-misc-definitions-in-headers,
-misc-new-delete-overloads,
-misc-no-recursion,
-misc-non-copyable-objects,
-misc-non-private-member-variables-in-classes,
-misc-static-assert,
-modernize-avoid-c-arrays,
-modernize-concat-nested-namespaces,
-modernize-deprecated-headers,
-modernize-deprecated-ios-base-aliases,
-modernize-pass-by-value,
-modernize-replace-auto-ptr,
-modernize-replace-disallow-copy-and-assign-macro,
-modernize-return-braced-init-list,
-modernize-unary-static-assert,
-modernize-use-auto,
-modernize-use-default-member-init,
-modernize-use-emplace,
-modernize-use-equals-default,
-modernize-use-nodiscard,
-modernize-use-noexcept,
-modernize-use-override,
-modernize-use-trailing-return-type,
-performance-inefficient-string-concatenation,
-performance-no-int-to-ptr,
-performance-type-promotion-in-math-fn,
-performance-trivially-destructible,
-performance-unnecessary-value-param,
-portability-simd-intrinsics,
-readability-convert-member-functions-to-static,
-readability-braces-around-statements,
-readability-else-after-return,
-readability-function-cognitive-complexity,
@ -131,9 +98,7 @@ Checks: '*,
-readability-implicit-bool-conversion,
-readability-isolate-declaration,
-readability-magic-numbers,
-readability-misleading-indentation,
-readability-named-parameter,
-readability-qualified-auto,
-readability-redundant-declaration,
-readability-static-accessed-through-instance,
-readability-suspicious-call-argument,

View File

@ -38,6 +38,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -112,8 +112,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#########################################################################################
#################################### ORDINARY BUILDS ####################################
@ -159,8 +161,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAarch64:
needs: [DockerHubPush]
@ -203,8 +207,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan:
needs: [DockerHubPush]
@ -247,8 +253,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan:
needs: [DockerHubPush]
@ -291,8 +299,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug:
needs: [DockerHubPush]
@ -335,8 +345,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
@ -380,8 +392,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
@ -418,8 +432,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
@ -456,8 +472,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
@ -497,8 +515,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -534,8 +554,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -122,8 +122,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
DocsCheck:
needs: DockerHubPush
@ -153,8 +155,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -116,6 +116,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -39,6 +39,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -112,8 +112,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
CompatibilityCheck:
needs: [BuilderDebRelease]
@ -144,8 +146,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
SplitBuildSmokeTest:
needs: [BuilderDebSplitted]
@ -176,8 +180,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#########################################################################################
#################################### ORDINARY BUILDS ####################################
@ -225,8 +231,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
BuilderDebAarch64:
needs: [DockerHubPush]
@ -267,8 +275,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderPerformance:
needs: DockerHubPush
@ -313,8 +323,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinRelease:
needs: [DockerHubPush]
@ -359,8 +371,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
# BuilderBinGCC:
# needs: [DockerHubPush]
@ -403,8 +417,10 @@ jobs:
# - name: Cleanup
# if: always()
# run: |
# docker kill "$(docker ps -q)" ||:
# docker rm -f "$(docker ps -a -q)" ||:
# # shellcheck disable=SC2046
# docker kill $(docker ps -q) ||:
# # shellcheck disable=SC2046
# docker rm -f $(docker ps -a -q) ||:
# sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan:
needs: [DockerHubPush]
@ -447,8 +463,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebUBsan:
needs: [DockerHubPush]
@ -491,8 +509,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan:
needs: [DockerHubPush]
@ -535,8 +555,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebMsan:
needs: [DockerHubPush]
@ -579,8 +601,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug:
needs: [DockerHubPush]
@ -623,8 +647,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
##########################################################################################
##################################### SPECIAL BUILDS #####################################
@ -670,8 +696,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinTidy:
needs: [DockerHubPush]
@ -714,8 +742,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinDarwin:
needs: [DockerHubPush]
@ -760,8 +790,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinAarch64:
needs: [DockerHubPush]
@ -806,8 +838,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinFreeBSD:
needs: [DockerHubPush]
@ -852,8 +886,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinDarwinAarch64:
needs: [DockerHubPush]
@ -898,8 +934,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinPPC64:
needs: [DockerHubPush]
@ -944,8 +982,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
@ -972,8 +1012,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
@ -1021,8 +1063,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
BuilderSpecialReport:
needs:
@ -1066,8 +1110,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
@ -1104,8 +1150,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseOrdinary:
needs: [BuilderDebRelease]
@ -1139,8 +1187,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3:
needs: [BuilderDebRelease]
@ -1174,8 +1224,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
@ -1209,8 +1261,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan0:
needs: [BuilderDebAsan]
@ -1246,8 +1300,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan1:
needs: [BuilderDebAsan]
@ -1283,8 +1339,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan0:
needs: [BuilderDebTsan]
@ -1320,8 +1378,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan1:
needs: [BuilderDebTsan]
@ -1357,8 +1417,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan2:
needs: [BuilderDebTsan]
@ -1394,8 +1456,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
@ -1429,8 +1493,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan0:
needs: [BuilderDebMsan]
@ -1466,8 +1532,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan1:
needs: [BuilderDebMsan]
@ -1503,8 +1571,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan2:
needs: [BuilderDebMsan]
@ -1540,8 +1610,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug0:
needs: [BuilderDebDebug]
@ -1577,8 +1649,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug1:
needs: [BuilderDebDebug]
@ -1614,8 +1688,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug2:
needs: [BuilderDebDebug]
@ -1651,8 +1727,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
@ -1689,8 +1767,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestReleaseDatabaseOrdinary:
needs: [BuilderDebRelease]
@ -1724,8 +1804,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
@ -1759,8 +1841,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
@ -1794,8 +1878,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
@ -1829,8 +1915,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
@ -1864,8 +1952,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
@ -1899,8 +1989,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
@ -1934,8 +2026,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
@ -1971,8 +2065,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestTsan:
needs: [BuilderDebTsan]
@ -2009,8 +2105,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestMsan:
needs: [BuilderDebMsan]
@ -2043,8 +2141,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestUBsan:
needs: [BuilderDebUBsan]
@ -2077,8 +2177,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestDebug:
needs: [BuilderDebDebug]
@ -2111,8 +2213,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -2150,8 +2254,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan1:
needs: [BuilderDebAsan]
@ -2186,8 +2292,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan2:
needs: [BuilderDebAsan]
@ -2222,8 +2330,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0:
needs: [BuilderDebTsan]
@ -2258,8 +2368,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan1:
needs: [BuilderDebTsan]
@ -2294,8 +2406,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan2:
needs: [BuilderDebTsan]
@ -2330,8 +2444,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan3:
needs: [BuilderDebTsan]
@ -2366,8 +2482,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease0:
needs: [BuilderDebRelease]
@ -2402,8 +2520,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease1:
needs: [BuilderDebRelease]
@ -2438,8 +2558,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
##################################### AST FUZZERS ############################################
@ -2475,8 +2597,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestTsan:
needs: [BuilderDebTsan]
@ -2509,8 +2633,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestUBSan:
needs: [BuilderDebUBsan]
@ -2543,8 +2669,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestMSan:
needs: [BuilderDebMsan]
@ -2577,8 +2705,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestDebug:
needs: [BuilderDebDebug]
@ -2611,8 +2741,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
#################################### UNIT TESTS #############################################
@ -2648,8 +2780,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsReleaseClang:
needs: [BuilderBinRelease]
@ -2682,8 +2816,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
# UnitTestsReleaseGCC:
# needs: [BuilderBinGCC]
@ -2716,8 +2852,10 @@ jobs:
# - name: Cleanup
# if: always()
# run: |
# docker kill "$(docker ps -q)" ||:
# docker rm -f "$(docker ps -a -q)" ||:
# # shellcheck disable=SC2046
# docker kill $(docker ps -q) ||:
# # shellcheck disable=SC2046
# docker rm -f $(docker ps -a -q) ||:
# sudo rm -fr "$TEMP_PATH"
UnitTestsTsan:
needs: [BuilderDebTsan]
@ -2750,8 +2888,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsMsan:
needs: [BuilderDebMsan]
@ -2784,8 +2924,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsUBsan:
needs: [BuilderDebUBsan]
@ -2818,8 +2960,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
#################################### PERFORMANCE TESTS ######################################
@ -2857,8 +3001,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison1:
needs: [BuilderPerformance]
@ -2893,8 +3039,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison2:
needs: [BuilderPerformance]
@ -2929,8 +3077,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison3:
needs: [BuilderPerformance]
@ -2965,8 +3115,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -118,6 +118,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"

View File

@ -137,8 +137,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FastTest:
needs: DockerHubPush
@ -171,8 +173,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
CompatibilityCheck:
needs: [BuilderDebRelease]
@ -203,8 +207,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
SplitBuildSmokeTest:
needs: [BuilderDebSplitted]
@ -235,8 +241,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#########################################################################################
#################################### ORDINARY BUILDS ####################################
@ -282,8 +290,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
BuilderPerformance:
needs: [DockerHubPush, FastTest]
@ -328,8 +338,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinRelease:
needs: [DockerHubPush, FastTest]
@ -372,8 +384,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
# BuilderBinGCC:
# needs: [DockerHubPush, FastTest]
@ -416,8 +430,10 @@ jobs:
# - name: Cleanup
# if: always()
# run: |
# docker kill "$(docker ps -q)" ||:
# docker rm -f "$(docker ps -a -q)" ||:
# # shellcheck disable=SC2046
# docker kill $(docker ps -q) ||:
# # shellcheck disable=SC2046
# docker rm -f $(docker ps -a -q) ||:
# sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAarch64:
needs: [DockerHubPush, FastTest]
@ -460,8 +476,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan:
needs: [DockerHubPush, FastTest]
@ -504,8 +522,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebUBsan:
needs: [DockerHubPush, FastTest]
@ -548,8 +568,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan:
needs: [DockerHubPush, FastTest]
@ -592,8 +614,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebMsan:
needs: [DockerHubPush, FastTest]
@ -636,8 +660,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug:
needs: [DockerHubPush, FastTest]
@ -680,8 +706,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
##########################################################################################
##################################### SPECIAL BUILDS #####################################
@ -727,8 +755,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinTidy:
needs: [DockerHubPush, FastTest]
@ -771,8 +801,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinDarwin:
needs: [DockerHubPush, FastTest]
@ -815,8 +847,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinAarch64:
needs: [DockerHubPush, FastTest]
@ -859,8 +893,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinFreeBSD:
needs: [DockerHubPush, FastTest]
@ -903,8 +939,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinDarwinAarch64:
needs: [DockerHubPush, FastTest]
@ -947,8 +985,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinPPC64:
needs: [DockerHubPush, FastTest]
@ -991,8 +1031,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
@ -1019,8 +1061,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
@ -1068,8 +1112,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
BuilderSpecialReport:
needs:
@ -1114,8 +1160,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
@ -1152,8 +1200,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseReplicated0:
needs: [BuilderDebRelease]
@ -1189,8 +1239,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseDatabaseReplicated1:
needs: [BuilderDebRelease]
@ -1226,8 +1278,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseWideParts:
needs: [BuilderDebRelease]
@ -1261,8 +1315,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3:
needs: [BuilderDebRelease]
@ -1296,8 +1352,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
@ -1331,8 +1389,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan0:
needs: [BuilderDebAsan]
@ -1368,8 +1428,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan1:
needs: [BuilderDebAsan]
@ -1405,8 +1467,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan0:
needs: [BuilderDebTsan]
@ -1442,8 +1506,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan1:
needs: [BuilderDebTsan]
@ -1479,8 +1545,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan2:
needs: [BuilderDebTsan]
@ -1516,8 +1584,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
@ -1551,8 +1621,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan0:
needs: [BuilderDebMsan]
@ -1588,8 +1660,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan1:
needs: [BuilderDebMsan]
@ -1625,8 +1699,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan2:
needs: [BuilderDebMsan]
@ -1662,8 +1738,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug0:
needs: [BuilderDebDebug]
@ -1699,8 +1777,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug1:
needs: [BuilderDebDebug]
@ -1736,8 +1816,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug2:
needs: [BuilderDebDebug]
@ -1773,8 +1855,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestFlakyCheck:
needs: [BuilderDebAsan]
@ -1808,8 +1892,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
TestsBugfixCheck:
runs-on: [self-hosted, stress-tester]
@ -1853,8 +1939,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
@ -1891,8 +1979,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
@ -1926,8 +2016,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
@ -1961,8 +2053,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
@ -1996,8 +2090,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
@ -2031,8 +2127,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
@ -2066,8 +2164,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
@ -2101,8 +2201,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
@ -2138,8 +2240,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestTsan:
needs: [BuilderDebTsan]
@ -2176,8 +2280,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestMsan:
needs: [BuilderDebMsan]
@ -2210,8 +2316,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestUBsan:
needs: [BuilderDebUBsan]
@ -2244,8 +2352,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestDebug:
needs: [BuilderDebDebug]
@ -2278,8 +2388,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
##################################### AST FUZZERS ############################################
@ -2315,8 +2427,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestTsan:
needs: [BuilderDebTsan]
@ -2349,8 +2463,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestUBSan:
needs: [BuilderDebUBsan]
@ -2383,8 +2499,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestMSan:
needs: [BuilderDebMsan]
@ -2417,8 +2535,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
ASTFuzzerTestDebug:
needs: [BuilderDebDebug]
@ -2451,8 +2571,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -2490,8 +2612,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan1:
needs: [BuilderDebAsan]
@ -2526,8 +2650,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan2:
needs: [BuilderDebAsan]
@ -2562,8 +2688,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0:
needs: [BuilderDebTsan]
@ -2598,8 +2726,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan1:
needs: [BuilderDebTsan]
@ -2634,8 +2764,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan2:
needs: [BuilderDebTsan]
@ -2670,8 +2802,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan3:
needs: [BuilderDebTsan]
@ -2706,8 +2840,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease0:
needs: [BuilderDebRelease]
@ -2742,8 +2878,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease1:
needs: [BuilderDebRelease]
@ -2778,8 +2916,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsFlakyCheck:
needs: [BuilderDebAsan]
@ -2812,8 +2952,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
#################################### UNIT TESTS #############################################
@ -2849,8 +2991,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsReleaseClang:
needs: [BuilderBinRelease]
@ -2883,8 +3027,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
# UnitTestsReleaseGCC:
# needs: [BuilderBinGCC]
@ -2917,8 +3063,10 @@ jobs:
# - name: Cleanup
# if: always()
# run: |
# docker kill "$(docker ps -q)" ||:
# docker rm -f "$(docker ps -a -q)" ||:
# # shellcheck disable=SC2046
# docker kill $(docker ps -q) ||:
# # shellcheck disable=SC2046
# docker rm -f $(docker ps -a -q) ||:
# sudo rm -fr "$TEMP_PATH"
UnitTestsTsan:
needs: [BuilderDebTsan]
@ -2951,8 +3099,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsMsan:
needs: [BuilderDebMsan]
@ -2985,8 +3135,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
UnitTestsUBsan:
needs: [BuilderDebUBsan]
@ -3019,8 +3171,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
#################################### PERFORMANCE TESTS ######################################
@ -3058,8 +3212,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison1:
needs: [BuilderPerformance]
@ -3094,8 +3250,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison2:
needs: [BuilderPerformance]
@ -3130,8 +3288,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
PerformanceComparison3:
needs: [BuilderPerformance]
@ -3166,8 +3326,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -58,6 +58,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -103,8 +103,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#########################################################################################
#################################### ORDINARY BUILDS ####################################
@ -152,8 +154,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAarch64:
needs: [DockerHubPush]
@ -194,8 +198,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebAsan:
needs: [DockerHubPush]
@ -238,8 +244,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebUBsan:
needs: [DockerHubPush]
@ -282,8 +290,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebTsan:
needs: [DockerHubPush]
@ -326,8 +336,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebMsan:
needs: [DockerHubPush]
@ -370,8 +382,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderDebDebug:
needs: [DockerHubPush]
@ -414,8 +428,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### BUILD REPORTER #######################################
@ -462,8 +478,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
########################### FUNCTIONAl STATELESS TESTS #######################################
@ -500,8 +518,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
@ -535,8 +555,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan0:
needs: [BuilderDebAsan]
@ -572,8 +594,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAsan1:
needs: [BuilderDebAsan]
@ -609,8 +633,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan0:
needs: [BuilderDebTsan]
@ -646,8 +672,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan1:
needs: [BuilderDebTsan]
@ -683,8 +711,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestTsan2:
needs: [BuilderDebTsan]
@ -720,8 +750,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestUBsan:
needs: [BuilderDebUBsan]
@ -755,8 +787,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan0:
needs: [BuilderDebMsan]
@ -792,8 +826,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan1:
needs: [BuilderDebMsan]
@ -829,8 +865,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestMsan2:
needs: [BuilderDebMsan]
@ -866,8 +904,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug0:
needs: [BuilderDebDebug]
@ -903,8 +943,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug1:
needs: [BuilderDebDebug]
@ -940,8 +982,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestDebug2:
needs: [BuilderDebDebug]
@ -977,8 +1021,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
############################ FUNCTIONAl STATEFUL TESTS #######################################
@ -1015,8 +1061,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAarch64:
needs: [BuilderDebAarch64]
@ -1050,8 +1098,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestAsan:
needs: [BuilderDebAsan]
@ -1085,8 +1135,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestTsan:
needs: [BuilderDebTsan]
@ -1120,8 +1172,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestMsan:
needs: [BuilderDebMsan]
@ -1155,8 +1209,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestUBsan:
needs: [BuilderDebUBsan]
@ -1190,8 +1246,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatefulTestDebug:
needs: [BuilderDebDebug]
@ -1225,8 +1283,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
##############################################################################################
######################################### STRESS TESTS #######################################
@ -1262,8 +1322,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestTsan:
needs: [BuilderDebTsan]
@ -1300,8 +1362,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestMsan:
needs: [BuilderDebMsan]
@ -1334,8 +1398,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestUBsan:
needs: [BuilderDebUBsan]
@ -1368,8 +1434,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
StressTestDebug:
needs: [BuilderDebDebug]
@ -1402,8 +1470,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -1441,8 +1511,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan1:
needs: [BuilderDebAsan]
@ -1477,8 +1549,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsAsan2:
needs: [BuilderDebAsan]
@ -1513,8 +1587,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0:
needs: [BuilderDebTsan]
@ -1549,8 +1625,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan1:
needs: [BuilderDebTsan]
@ -1585,8 +1663,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan2:
needs: [BuilderDebTsan]
@ -1621,8 +1701,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan3:
needs: [BuilderDebTsan]
@ -1657,8 +1739,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease0:
needs: [BuilderDebRelease]
@ -1693,8 +1777,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
IntegrationTestsRelease1:
needs: [BuilderDebRelease]
@ -1729,8 +1815,10 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
FinishCheck:
needs:

View File

@ -37,6 +37,8 @@ jobs:
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
# shellcheck disable=SC2046
docker kill $(docker ps -q) ||:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -1,32 +1,6 @@
cmake_minimum_required(VERSION 3.14)
foreach(policy
CMP0023
CMP0048 # CMake 3.0
CMP0074 # CMake 3.12
CMP0077
CMP0079
)
if(POLICY ${policy})
cmake_policy(SET ${policy} NEW)
endif()
endforeach()
# set default policy
foreach(default_policy_var_name
# make option() honor normal variables for BUILD_SHARED_LIBS:
# - re2
# - snappy
CMAKE_POLICY_DEFAULT_CMP0077
# Google Test from sources uses too old cmake, 2.6.x, and CMP0022 should
# set, to avoid using deprecated LINK_INTERFACE_LIBRARIES(_<CONFIG>)? over
# INTERFACE_LINK_LIBRARIES.
CMAKE_POLICY_DEFAULT_CMP0022
)
set(${default_policy_var_name} NEW)
endforeach()
project(ClickHouse)
project(ClickHouse LANGUAGES C CXX ASM)
# If turned off: e.g. when ENABLE_FOO is ON, but FOO tool was not found, the CMake will continue.
option(FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION
@ -39,11 +13,10 @@ else()
set(RECONFIGURE_MESSAGE_LEVEL WARNING)
endif()
enable_language(C CXX ASM)
include (cmake/arch.cmake)
include (cmake/target.cmake)
include (cmake/tools.cmake)
include (cmake/ccache.cmake)
include (cmake/clang_tidy.cmake)
include (cmake/git_status.cmake)
@ -52,7 +25,6 @@ include (cmake/git_status.cmake)
macro (export)
endmacro ()
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/")
set(CMAKE_EXPORT_COMPILE_COMMANDS 1) # Write compile_commands.json
set(CMAKE_LINK_DEPENDS_NO_SHARED 1) # Do not relink all depended targets on .so
set(CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel" CACHE STRING "" FORCE)
@ -67,8 +39,6 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/sysroot/README.md")
message (FATAL_ERROR "Submodules are not initialized. Run\n\tgit submodule update --init --recursive")
endif ()
include (cmake/ccache.cmake)
# Take care to add prlimit in command line before ccache, or else ccache thinks that
# prlimit is compiler, and clang++ is its input file, and refuses to work with
# multiple inputs, e.g in ccache log:
@ -161,20 +131,22 @@ add_library(global-libs INTERFACE)
include (cmake/fuzzer.cmake)
include (cmake/sanitize.cmake)
if (CMAKE_GENERATOR STREQUAL "Ninja" AND NOT DISABLE_COLORED_BUILD)
option(ENABLE_COLORED_BUILD "Enable colors in compiler output" ON)
set (CMAKE_COLOR_MAKEFILE ${ENABLE_COLORED_BUILD}) # works only for the makefile generator
if (ENABLE_COLORED_BUILD AND CMAKE_GENERATOR STREQUAL "Ninja")
# Turn on colored output. https://github.com/ninja-build/ninja/wiki/FAQ
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-color=always")
# ... such manually setting of flags can be removed once CMake supports a variable to
# activate colors in *all* build systems: https://gitlab.kitware.com/cmake/cmake/-/issues/15502
endif ()
include (cmake/check_flags.cmake)
include (cmake/add_warning.cmake)
set (COMMON_WARNING_FLAGS "${COMMON_WARNING_FLAGS} -Wall") # -Werror and many more is also added inside cmake/warnings.cmake
if (COMPILER_CLANG)
# clang: warning: argument unused during compilation: '-specs=/usr/share/dpkg/no-pie-compile.specs' [-Wunused-command-line-argument]
set (COMMON_WARNING_FLAGS "${COMMON_WARNING_FLAGS} -Wno-unused-command-line-argument")
# generate ranges for fast "addr2line" search
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
@ -371,12 +343,11 @@ set (COMPILER_FLAGS "${COMPILER_FLAGS}")
# Our built-in unwinder only supports DWARF version up to 4.
set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
set (CMAKE_BUILD_COLOR_MAKEFILE ON)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS}")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_CXX_FLAGS_ADD}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}")
set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} -fno-inline ${CMAKE_C_FLAGS_ADD}")
@ -423,13 +394,6 @@ endif ()
# Turns on all external libs like s3, kafka, ODBC, ...
option(ENABLE_LIBRARIES "Enable all external libraries by default" ON)
if (NOT (OS_LINUX OR OS_DARWIN))
# Using system libs can cause a lot of warnings in includes (on macro expansion).
option(WERROR "Enable -Werror compiler option" OFF)
else ()
option(WERROR "Enable -Werror compiler option" ON)
endif ()
# Increase stack size on Musl. We need big stack for our recursive-descend parser.
if (USE_MUSL)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,stack-size=2097152")
@ -447,6 +411,13 @@ elseif (OS_FREEBSD)
endif ()
link_libraries(global-group)
if (NOT (OS_LINUX OR OS_DARWIN))
# Using system libs can cause a lot of warnings in includes (on macro expansion).
option(WERROR "Enable -Werror compiler option" OFF)
else ()
option(WERROR "Enable -Werror compiler option" ON)
endif ()
if (WERROR)
# Don't pollute CMAKE_CXX_FLAGS with -Werror as it will break some CMake checks.
# Instead, adopt modern cmake usage requirement.
@ -455,7 +426,7 @@ endif ()
# Make this extra-checks for correct library dependencies.
if (OS_LINUX AND NOT SANITIZE)
target_link_options(global-group INTERFACE "-Wl,--no-undefined")
target_link_options(global-group INTERFACE "LINKER:--no-undefined")
endif ()
######################################
@ -466,7 +437,7 @@ set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
if (USE_STATIC_LIBRARIES)
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
if (OS_LINUX AND NOT ARCH_ARM)
if (OS_LINUX AND NOT ARCH_AARCH64)
# Slightly more efficient code can be generated
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
@ -500,8 +471,7 @@ endif ()
message (STATUS
"Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ;
USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES}
SPLIT_SHARED=${SPLIT_SHARED_LIBRARIES}
CCACHE=${CCACHE_FOUND} ${CCACHE_VERSION}")
SPLIT_SHARED=${SPLIT_SHARED_LIBRARIES}")
include (GNUInstallDirs)

View File

@ -1,5 +1,5 @@
#include <string>
#include <string.h>
#include <cstring>
#include <Poco/UTF8Encoding.h>
#include <Poco/NumberParser.h>
@ -12,7 +12,7 @@
#define JSON_MAX_DEPTH 100
POCO_IMPLEMENT_EXCEPTION(JSONException, Poco::Exception, "JSONException")
POCO_IMPLEMENT_EXCEPTION(JSONException, Poco::Exception, "JSONException") // NOLINT(cert-err60-cpp, modernize-use-noexcept)
/// Прочитать беззнаковое целое в простом формате из не-0-terminated строки.

View File

@ -5,7 +5,7 @@
#include <algorithm>
#include <cassert>
#include <string.h>
#include <cstring>
#include <unistd.h>
#include <sys/select.h>
#include <sys/time.h>

View File

@ -1,6 +1,6 @@
#include <base/demangle.h>
#include <stdlib.h>
#include <cstdlib>
#include <cxxabi.h>
static DemangleResult tryDemangle(const char * name, int & status)

View File

@ -3,7 +3,7 @@
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <errno.h>
#include <cerrno>
void * mremap_fallback(

View File

@ -169,9 +169,9 @@ obstacle to adoption, that text has been removed.
*/
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <cmath>
#include <cstdint>
#include <cstdio>
double preciseExp10(double x)
{

View File

@ -1,7 +1,7 @@
#include <base/sleep.h>
#include <time.h>
#include <errno.h>
#include <ctime>
#include <cerrno>
#if defined(OS_DARWIN)
#include <mach/mach.h>

View File

@ -1,14 +1,19 @@
#pragma once
#include <cstddef>
#ifdef HAS_RESERVED_IDENTIFIER
#pragma clang diagnostic ignored "-Wreserved-identifier"
#endif
constexpr size_t KiB = 1024;
constexpr size_t MiB = 1024 * KiB;
constexpr size_t GiB = 1024 * MiB;
#ifdef HAS_RESERVED_IDENTIFIER
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wreserved-identifier"
#endif
constexpr size_t operator"" _KiB(unsigned long long val) { return val * KiB; }
constexpr size_t operator"" _MiB(unsigned long long val) { return val * MiB; }
constexpr size_t operator"" _GiB(unsigned long long val) { return val * GiB; }
#ifdef HAS_RESERVED_IDENTIFIER
# pragma clang diagnostic pop
#endif

View File

@ -5,7 +5,6 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
set (ARCH_AMD64 1)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)")
set (ARCH_AARCH64 1)
set (ARCH_ARM 1)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64le.*|PPC64LE.*)")
set (ARCH_PPC64LE 1)
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "riscv64")

View File

@ -1,4 +1,4 @@
if (CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" OR CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache")
if (CMAKE_CXX_COMPILER_LAUNCHER MATCHES "ccache" OR CMAKE_C_COMPILER_LAUNCHER MATCHES "ccache")
set(COMPILER_MATCHES_CCACHE 1)
else()
set(COMPILER_MATCHES_CCACHE 0)
@ -30,7 +30,7 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
string(REGEX REPLACE "ccache version ([0-9\\.]+).*" "\\1" CCACHE_VERSION ${CCACHE_VERSION})
if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
message(STATUS "Using ${CCACHE_FOUND} ${CCACHE_VERSION}")
message(STATUS "Using ccache: ${CCACHE_FOUND} (version ${CCACHE_VERSION})")
set(LAUNCHER ${CCACHE_FOUND})
# debian (debhelpers) set SOURCE_DATE_EPOCH environment variable, that is
@ -49,8 +49,8 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
set (CMAKE_CXX_COMPILER_LAUNCHER ${LAUNCHER} ${CMAKE_CXX_COMPILER_LAUNCHER})
set (CMAKE_C_COMPILER_LAUNCHER ${LAUNCHER} ${CMAKE_C_COMPILER_LAUNCHER})
else ()
message(${RECONFIGURE_MESSAGE_LEVEL} "Not using ${CCACHE_FOUND} ${CCACHE_VERSION} bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
message(${RECONFIGURE_MESSAGE_LEVEL} "Using ccache: No. Found ${CCACHE_FOUND} (version ${CCACHE_VERSION}) but disabled because of bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
endif ()
elseif (NOT CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use ccache")
message (${RECONFIGURE_MESSAGE_LEVEL} "Using ccache: No")
endif ()

View File

@ -3,6 +3,5 @@ include (CheckCCompilerFlag)
check_cxx_compiler_flag("-Wreserved-identifier" HAS_RESERVED_IDENTIFIER)
check_cxx_compiler_flag("-Wsuggest-destructor-override" HAS_SUGGEST_DESTRUCTOR_OVERRIDE)
check_cxx_compiler_flag("-Wshadow" HAS_SHADOW)
check_cxx_compiler_flag("-Wsuggest-override" HAS_SUGGEST_OVERRIDE)
check_cxx_compiler_flag("-Xclang -fuse-ctor-homing" HAS_USE_CTOR_HOMING)

View File

@ -31,7 +31,10 @@ if (ARCH_NATIVE)
elseif (ARCH_AARCH64)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8-a+crc")
else ()
elseif (ARCH_PPC64LE)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -DNO_WARN_X86_INTRINSICS")
elseif (ARCH_AMD64)
set (TEST_FLAG "-mssse3")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
@ -60,10 +63,6 @@ else ()
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
endif ()
if (ARCH_PPC64LE)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -DNO_WARN_X86_INTRINSICS")
endif ()
set (TEST_FLAG "-msse4.2")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
@ -93,7 +92,6 @@ else ()
endif ()
set (TEST_FLAG "-mpopcnt")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles("
int main() {
@ -186,6 +184,8 @@ else ()
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256")
endif ()
endif ()
else ()
# RISC-V + exotic platforms
endif ()
cmake_pop_check_state ()

View File

@ -11,7 +11,7 @@ if (Git_FOUND)
message(STATUS "HEAD's commit hash ${GIT_COMMIT_ID}")
execute_process(
COMMAND ${GIT_EXECUTABLE} status
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE)
else()
message(STATUS "The git program could not be found.")
endif()

View File

@ -15,6 +15,8 @@ elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin")
elseif (CMAKE_SYSTEM_NAME MATCHES "SunOS")
set (OS_SUNOS 1)
add_definitions(-D OS_SUNOS)
else ()
message (FATAL_ERROR "Platform ${CMAKE_SYSTEM_NAME} is not supported")
endif ()
if (CMAKE_CROSSCOMPILING)

View File

@ -6,67 +6,65 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
set (COMPILER_CLANG 1) # Safe to treat AppleClang as a regular Clang, in general.
elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set (COMPILER_CLANG 1)
else ()
message (FATAL_ERROR "Compiler ${CMAKE_CXX_COMPILER_ID} is not supported")
endif ()
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version)
# Print details to output
execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version OUTPUT_VARIABLE COMPILER_SELF_IDENTIFICATION OUTPUT_STRIP_TRAILING_WHITESPACE)
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
# Require minimum compiler versions
set (CLANG_MINIMUM_VERSION 12)
set (XCODE_MINIMUM_VERSION 12.0)
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
set (GCC_MINIMUM_VERSION 11)
if (COMPILER_GCC)
# Require minimum version of gcc
set (GCC_MINIMUM_VERSION 11)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION})
message (FATAL_ERROR "GCC version must be at least ${GCC_MINIMUM_VERSION}. For example, if GCC ${GCC_MINIMUM_VERSION} is available under gcc-${GCC_MINIMUM_VERSION}, g++-${GCC_MINIMUM_VERSION} names, do the following: export CC=gcc-${GCC_MINIMUM_VERSION} CXX=g++-${GCC_MINIMUM_VERSION}; rm -rf CMakeCache.txt CMakeFiles; and re run cmake or ./release.")
message (FATAL_ERROR "Compilation with GCC version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${GCC_MINIMUM_VERSION}.")
endif ()
message (WARNING "GCC compiler is not officially supported for ClickHouse. You should migrate to clang.")
message (WARNING "Compilation with GCC is unsupported. Please use Clang instead.")
elseif (COMPILER_CLANG)
# Require minimum version of clang/apple-clang
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
if (NOT ALLOW_APPLECLANG AND NOT DEFINED ENV{ALLOW_APPLECLANG})
message (FATAL_ERROR "AppleClang is not supported, you should install clang from brew. See the instruction: https://clickhouse.com/docs/en/development/build-osx/")
message (FATAL_ERROR "Compilation with AppleClang is unsupported. Please use vanilla Clang, e.g. from Homebrew.")
endif ()
# AppleClang 10.0.1 (Xcode 10.2) corresponds to LLVM/Clang upstream version 7.0.0
# AppleClang 11.0.0 (Xcode 11.0) corresponds to LLVM/Clang upstream version 8.0.0
set (XCODE_MINIMUM_VERSION 10.2)
set (APPLE_CLANG_MINIMUM_VERSION 10.0.1)
# For a mapping between XCode / AppleClang / vanilla Clang versions, see https://en.wikipedia.org/wiki/Xcode
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "AppleClang compiler version must be at least ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
elseif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0)
# char8_t is available starting (upstream vanilla) Clang 7, but prior to Clang 8,
# it is not enabled by -std=c++20 and can be enabled with an explicit -fchar8_t.
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fchar8_t")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t")
message (FATAL_ERROR "Compilation with AppleClang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
endif ()
else ()
set (CLANG_MINIMUM_VERSION 12)
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Clang version must be at least ${CLANG_MINIMUM_VERSION}.")
message (FATAL_ERROR "Compilation with Clang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${CLANG_MINIMUM_VERSION}.")
endif ()
endif ()
else ()
message (WARNING "You are using an unsupported compiler. Compilation has only been tested with Clang and GCC.")
endif ()
# Linker
string (REGEX MATCHALL "[0-9]+" COMPILER_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION})
list (GET COMPILER_VERSION_LIST 0 COMPILER_VERSION_MAJOR)
# Linker
# Example values: `lld-10`, `gold`.
option (LINKER_NAME "Linker name or full path")
if (COMPILER_GCC AND NOT LINKER_NAME)
find_program (LLD_PATH NAMES "ld.lld")
find_program (GOLD_PATH NAMES "ld.gold")
elseif (NOT LINKER_NAME)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld")
find_program (GOLD_PATH NAMES "ld.gold" "gold")
endif ()
if (NOT LINKER_NAME)
if (COMPILER_GCC)
find_program (LLD_PATH NAMES "ld.lld")
find_program (GOLD_PATH NAMES "ld.gold")
elseif (COMPILER_CLANG)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "lld-${COMPILER_VERSION_MAJOR}" "ld.lld" "lld")
find_program (GOLD_PATH NAMES "ld.gold" "gold")
endif ()
endif()
if (OS_LINUX AND NOT LINKER_NAME)
# We prefer LLD linker over Gold or BFD on Linux.
# prefer lld linker over gold or ld on linux
if (LLD_PATH)
if (COMPILER_GCC)
# GCC driver requires one of supported linker names like "lld".
@ -87,9 +85,10 @@ if (OS_LINUX AND NOT LINKER_NAME)
endif ()
endif ()
endif ()
# TODO: allow different linker on != OS_LINUX
if (LINKER_NAME)
if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
if (COMPILER_CLANG)
find_program (LLD_PATH NAMES ${LINKER_NAME})
if (NOT LLD_PATH)
message (FATAL_ERROR "Using linker ${LINKER_NAME} but can't find its path.")
@ -101,9 +100,14 @@ if (LINKER_NAME)
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
endif ()
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
endif ()
if (LINKER_NAME)
message(STATUS "Using linker: ${LINKER_NAME}")
else()
message(STATUS "Using linker: <default>")
endif()
# Archiver
if (COMPILER_GCC)
@ -116,6 +120,8 @@ if (LLVM_AR_PATH)
set (CMAKE_AR "${LLVM_AR_PATH}")
endif ()
message(STATUS "Using archiver: ${CMAKE_AR}")
# Ranlib
if (COMPILER_GCC)
@ -128,6 +134,8 @@ if (LLVM_RANLIB_PATH)
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}")
endif ()
message(STATUS "Using ranlib: ${CMAKE_RANLIB}")
# Install Name Tool
if (COMPILER_GCC)
@ -140,6 +148,8 @@ if (LLVM_INSTALL_NAME_TOOL_PATH)
set (CMAKE_INSTALL_NAME_TOOL "${LLVM_INSTALL_NAME_TOOL_PATH}")
endif ()
message(STATUS "Using install-name-tool: ${CMAKE_INSTALL_NAME_TOOL}")
# Objcopy
if (COMPILER_GCC)
@ -148,29 +158,13 @@ else ()
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
endif ()
if (NOT OBJCOPY_PATH AND OS_DARWIN)
find_program (BREW_PATH NAMES "brew")
if (BREW_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX)
if (LLVM_PREFIX)
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
if (NOT OBJCOPY_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX)
if (BINUTILS_PREFIX)
find_program (OBJCOPY_PATH NAMES "objcopy" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
endif ()
endif ()
endif ()
if (OBJCOPY_PATH)
message (STATUS "Using objcopy: ${OBJCOPY_PATH}")
else ()
message (FATAL_ERROR "Cannot find objcopy.")
endif ()
# Strip (FIXME copypaste)
# Strip
if (COMPILER_GCC)
find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-13" "llvm-strip-12" "llvm-strip-11" "strip")
@ -178,22 +172,6 @@ else ()
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
endif ()
if (NOT STRIP_PATH AND OS_DARWIN)
find_program (BREW_PATH NAMES "brew")
if (BREW_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX)
if (LLVM_PREFIX)
find_program (STRIP_PATH NAMES "llvm-strip" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
if (NOT STRIP_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX)
if (BINUTILS_PREFIX)
find_program (STRIP_PATH NAMES "strip" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
endif ()
endif ()
endif ()
if (STRIP_PATH)
message (STATUS "Using strip: ${STRIP_PATH}")
else ()

View File

@ -7,7 +7,7 @@
# - sometimes warnings from 3rd party libraries may come from macro substitutions in our code
# and we have to wrap them with #pragma GCC/clang diagnostic ignored
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra")
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
# Intended for exploration of new compiler warnings that may be found useful.
@ -25,6 +25,7 @@ if (COMPILER_CLANG)
no_warning(vla-extension)
no_warning(zero-length-array)
no_warning(c11-extensions)
no_warning(unused-command-line-argument)
if (WEVERYTHING)
add_warning(everything)

View File

@ -1,4 +1,4 @@
if(ARCH_AMD64 OR ARCH_ARM)
if(ARCH_AMD64 OR ARCH_AARCH64)
option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES})
elseif(ENABLE_BASE64)
message (${RECONFIGURE_MESSAGE_LEVEL} "base64 library is only supported on x86_64 and aarch64")

View File

@ -114,7 +114,7 @@ if (SANITIZE AND (SANITIZE STREQUAL "address" OR SANITIZE STREQUAL "thread"))
"${LIBRARY_DIR}/libs/context/src/continuation.cpp"
)
endif()
if (ARCH_ARM)
if (ARCH_AARCH64)
set (SRCS_CONTEXT ${SRCS_CONTEXT}
"${LIBRARY_DIR}/libs/context/src/asm/jump_arm64_aapcs_elf_gas.S"
"${LIBRARY_DIR}/libs/context/src/asm/make_arm64_aapcs_elf_gas.S"

View File

@ -1,5 +1,5 @@
if (SANITIZE OR NOT (
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_ARM OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
((OS_LINUX OR OS_FREEBSD) AND (ARCH_AMD64 OR ARCH_AARCH64 OR ARCH_PPC64LE OR ARCH_RISCV64)) OR
(OS_DARWIN AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG"))
))
if (ENABLE_JEMALLOC)
@ -141,7 +141,7 @@ if (ARCH_AMD64)
else()
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_x86_64")
endif()
elseif (ARCH_ARM)
elseif (ARCH_AARCH64)
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_aarch64")
elseif (ARCH_PPC64LE)
set(JEMALLOC_INCLUDE_PREFIX "${JEMALLOC_INCLUDE_PREFIX}_ppc64le")

View File

@ -7,9 +7,9 @@ CHECK_FUNCTION_EXISTS(nanosleep HAVE_NANOSLEEP)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing")
IF(ENABLE_SSE STREQUAL ON AND NOT ARCH_PPC64LE AND NOT ARCH_AARCH64 AND NOT ARCH_ARM)
IF(ENABLE_SSE STREQUAL ON AND NOT ARCH_PPC64LE AND NOT ARCH_AARCH64 AND NOT ARCH_AARCH64)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
ENDIF(ENABLE_SSE STREQUAL ON AND NOT ARCH_PPC64LE AND NOT ARCH_AARCH64 AND NOT ARCH_ARM)
ENDIF(ENABLE_SSE STREQUAL ON AND NOT ARCH_PPC64LE AND NOT ARCH_AARCH64 AND NOT ARCH_AARCH64)
IF(NOT TEST_HDFS_PREFIX)
SET(TEST_HDFS_PREFIX "./" CACHE STRING "default directory prefix used for test." FORCE)

View File

@ -1,4 +1,4 @@
if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_PPC64LE)
if(NOT ARCH_AARCH64 AND NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_PPC64LE)
option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES})
elseif(ENABLE_HDFS)
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration")

2
contrib/poco vendored

@ -1 +1 @@
Subproject commit 5d11f0aa6fd2faad0a7b34aa355cd50c4deb27e6
Subproject commit de35b9fd72b57127abdc3a5beaf0e320d767e356

View File

@ -13,7 +13,7 @@ then
elif [ "${ARCH}" = "aarch64" ]
then
DIR="aarch64"
elif [ "${ARCH}" = "powerpc64le" ]
elif [ "${ARCH}" = "powerpc64le" ] || [ "${ARCH}" = "ppc64le" ]
then
DIR="powerpc64le"
fi
@ -25,7 +25,7 @@ then
elif [ "${ARCH}" = "aarch64" ]
then
DIR="freebsd-aarch64"
elif [ "${ARCH}" = "powerpc64le" ]
elif [ "${ARCH}" = "powerpc64le" ] || [ "${ARCH}" = "ppc64le" ]
then
DIR="freebsd-powerpc64le"
fi

View File

@ -54,6 +54,7 @@ To build using Homebrew's vanilla Clang compiler (the only **recommended** way):
``` bash
cd ClickHouse
mkdir build
export PATH=$(brew --prefix llvm)/bin:$PATH
export CC=$(brew --prefix llvm)/bin/clang
export CXX=$(brew --prefix llvm)/bin/clang++
cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -S . -B build
@ -79,6 +80,8 @@ To build using Homebrew's vanilla GCC compiler (this option is only for developm
``` bash
cd ClickHouse
mkdir build
export PATH=$(brew --prefix binutils)/bin:$PATH
export PATH=$(brew --prefix gcc)/bin:$PATH
export CC=$(brew --prefix gcc)/bin/gcc-11
export CXX=$(brew --prefix gcc)/bin/g++-11
cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -S . -B build

View File

@ -19,7 +19,7 @@ The following tutorial is based on the Ubuntu Linux system. With appropriate cha
### Install Git, CMake, Python and Ninja {#install-git-cmake-python-and-ninja}
``` bash
$ sudo apt-get install git cmake python ninja-build
sudo apt-get install git cmake python ninja-build
```
Or cmake3 instead of cmake on older systems.
@ -37,8 +37,8 @@ For other Linux distribution - check the availability of the [prebuild packages]
#### Use the latest clang for Builds
``` bash
$ export CC=clang-14
$ export CXX=clang++-14
export CC=clang-14
export CXX=clang++-14
```
In this example we use version 14 that is the latest as of Feb 2022.
@ -48,23 +48,23 @@ Gcc can also be used though it is discouraged.
### Checkout ClickHouse Sources {#checkout-clickhouse-sources}
``` bash
$ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
git clone --recursive git@github.com:ClickHouse/ClickHouse.git
```
or
``` bash
$ git clone --recursive https://github.com/ClickHouse/ClickHouse.git
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
```
### Build ClickHouse {#build-clickhouse}
``` bash
$ cd ClickHouse
$ mkdir build
$ cd build
$ cmake ..
$ ninja
cd ClickHouse
mkdir build
cd build
cmake ..
ninja
```
To create an executable, run `ninja clickhouse`.
@ -114,13 +114,13 @@ make -j $(nproc)
Here is an example of how to build `clang` and all the llvm infrastructure from sources:
```
git clone git@github.com:llvm/llvm-project.git
mkdir llvm-build && cd llvm-build
cmake -DCMAKE_BUILD_TYPE:STRING=Release -DLLVM_ENABLE_PROJECTS=all ../llvm-project/llvm/
make -j16
sudo make install
hash clang
clang --version
git clone git@github.com:llvm/llvm-project.git
mkdir llvm-build && cd llvm-build
cmake -DCMAKE_BUILD_TYPE:STRING=Release -DLLVM_ENABLE_PROJECTS=all ../llvm-project/llvm/
make -j16
sudo make install
hash clang
clang --version
```
You can install the older clang like clang-11 from packages and then use it to build the new clang from sources.
@ -140,21 +140,21 @@ hash cmake
### Install Git {#install-git}
``` bash
$ sudo apt-get update
$ sudo apt-get install git python debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
sudo apt-get update
sudo apt-get install git python debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring
```
### Checkout ClickHouse Sources {#checkout-clickhouse-sources-1}
``` bash
$ git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
$ cd ClickHouse
git clone --recursive --branch master https://github.com/ClickHouse/ClickHouse.git
cd ClickHouse
```
### Run Release Script {#run-release-script}
``` bash
$ ./release
./release
```
## You Dont Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}

View File

@ -300,6 +300,12 @@ Note that ClickHouse uses forks of these libraries, see https://github.com/Click
<td>Take care to add prlimit in command line before ccache, or else ccache thinks that prlimit is compiler, and clang++ is its input file, and refuses to work with multiple inputs, e.g in ccache log: [2021-03-31T18:06:32.655327 36900] Command line: /usr/bin/ccache prlimit --as=10000000000 --data=5000000000 --cpu=600 /usr/bin/clang++-11 - ...... std=gnu++2a -MD -MT src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -MF src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o.d -o src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -c ../src/Storages/MergeTree/IMergeTreeDataPart.cpp [2021-03-31T18:06:32.656704 36900] Multiple input files: /usr/bin/clang++-11 and ../src/Storages/MergeTree/IMergeTreeDataPart.cpp Another way would be to use --ccache-skip option before clang++-11 to make ccache ignore it.</td>
</tr>
<tr>
<td><a name="enable-colored-build"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L160" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_COLORED_BUILD</code></a></td>
<td><code class="syntax">ON</code></td>
<td>Enable colored diagnostics in build log.</td>
<td></td>
</tr>
<tr>
<td><a name="enable-examples"></a><a href="https://github.com/clickhouse/clickhouse/blob/master/CMakeLists.txt#L201" rel="external nofollow noreferrer" target="_blank"><code class="syntax">ENABLE_EXAMPLES</code></a></td>
<td><code class="syntax">OFF</code></td>
<td>Build all example programs in 'examples' subdirectories</td>

View File

@ -189,6 +189,8 @@ Example:
- `_timestamp` — Timestamp of the message.
- `_timestamp_ms` — Timestamp in milliseconds of the message.
- `_partition` — Partition of Kafka topic.
- `_headers.name` — Array of message's headers keys.
- `_headers.value` — Array of message's headers values.
**See Also**

View File

@ -29,7 +29,7 @@ To analyze the `trace_log` system table:
- Use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` [introspection functions](../../sql-reference/functions/introspection.md) to get function names and their positions in ClickHouse code. To get a profile for some query, you need to aggregate data from the `trace_log` table. You can aggregate data by individual functions or by the whole stack traces.
If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui/#clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope).
If you need to visualize `trace_log` info, try [flamegraph](../../interfaces/third-party/gui.md#clickhouse-flamegraph-clickhouse-flamegraph) and [speedscope](https://github.com/laplab/clickhouse-speedscope).
## Example {#example}

View File

@ -12,3 +12,34 @@ Values can be added to the array in any (indeterminate) order.
The second version (with the `max_size` parameter) limits the size of the resulting array to `max_size` elements. For example, `groupArray(1)(x)` is equivalent to `[any (x)]`.
In some cases, you can still rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`.
**Example**
``` text
SELECT * FROM default.ck;
┌─id─┬─name─────┐
│ 1 │ zhangsan │
│ 1 │ ᴺᵁᴸᴸ │
│ 1 │ lisi │
│ 2 │ wangwu │
└────┴──────────┘
```
Query:
``` sql
select id, groupArray(10)(name) from default.ck group by id;
```
Result:
``` text
┌─id─┬─groupArray(10)(name)─┐
│ 1 │ ['zhangsan','lisi'] │
│ 2 │ ['wangwu'] │
└────┴──────────────────────┘
```
The groupArray function will remove ᴺᵁᴸᴸ value based on the above results.

View File

@ -0,0 +1,76 @@
---
sidebar_position: 54
sidebar_label: JSON
---
# JSON {#json-data-type}
Stores JavaScript Object Notation (JSON) documents in a single column.
`JSON` is an alias for `Object('json')`.
:::warning
The JSON data type is an experimental feature. To use it, set `allow_experimental_object_type = 1`.
:::
## Example {#usage-example}
**Example 1**
Creating a table with a `JSON` column and inserting data into it:
```sql
CREATE TABLE json
(
o JSON
)
ENGINE = Memory
```
```sql
INSERT INTO json VALUES ('{"a": 1, "b": { "c": 2, "d": [1, 2, 3] }}')
```
```sql
SELECT o.a, o.b.c, o.b.d[3] FROM json
```
```text
┌─o.a─┬─o.b.c─┬─arrayElement(o.b.d, 3)─┐
│ 1 │ 2 │ 3 │
└─────┴───────┴────────────────────────┘
```
**Example 2**
To be able to create an ordered `MergeTree` family table the sorting key has to be extracted into its column. For example, to insert a file of compressed HTTP access logs in JSON format:
```sql
CREATE TABLE logs
(
timestamp DateTime,
message JSON
)
ENGINE = MergeTree
ORDER BY timestamp
```
```sql
INSERT INTO logs
SELECT parseDateTimeBestEffort(JSONExtractString(json, 'timestamp')), json
FROM file('access.json.gz', JSONAsString)
```
## Displaying JSON columns
When displaying a `JSON` column ClickHouse only shows the field values by default (because internally, it is represented as a tuple). You can display the field names as well by setting `output_format_json_named_tuples_as_objects = 1`:
```sql
SET output_format_json_named_tuples_as_objects = 1
SELECT * FROM json FORMAT JSONEachRow
```
```text
{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}}
```

View File

@ -12,7 +12,7 @@ The following operations are available:
- `ALTER TABLE [db].name DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk.
- `ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name` - The query rebuilds the secondary index `name` in the partition `partition_name`. Implemented as a [mutation](../../../../sql-reference/statements/alter/index.md#mutations). To rebuild index over the whole data in the table you need to remove `IN PARTITION` from query.
- `ALTER TABLE [db.]table MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](../../../../sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
The first two commands are lightweight in a sense that they only change metadata or remove files.
@ -20,4 +20,4 @@ Also, they are replicated, syncing indices metadata via ZooKeeper.
:::note
Index manipulation is supported only for tables with [`*MergeTree`](../../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../../engines/table-engines/mergetree-family/replication.md) variants).
:::
:::

View File

@ -104,7 +104,7 @@ There are many nuances to processing `NULL`. For example, if at least one of the
In queries, you can check `NULL` using the [IS NULL](../sql-reference/operators/index.md#operator-is-null) and [IS NOT NULL](../sql-reference/operators/index.md) operators and the related functions `isNull` and `isNotNull`.
### Heredoc {#heredeoc}
### Heredoc {#heredoc}
A [heredoc](https://en.wikipedia.org/wiki/Here_document) is a way to define a string (often multiline), while maintaining the original formatting. A heredoc is defined as a custom string literal, placed between two `$` symbols, for example `$heredoc$`. A value between two heredocs is processed "as-is".

View File

@ -102,7 +102,7 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def')
В запросах можно проверить `NULL` с помощью операторов [IS NULL](operators/index.md#operator-is-null) и [IS NOT NULL](operators/index.md), а также соответствующих функций `isNull` и `isNotNull`.
### Heredoc {#heredeoc}
### Heredoc {#heredoc}
Синтаксис [heredoc](https://ru.wikipedia.org/wiki/Heredoc-синтаксис) — это способ определения строк с сохранением исходного формата (часто с переносом строки). `Heredoc` задается как произвольный строковый литерал между двумя символами `$`, например `$heredoc$`. Значение между двумя `heredoc` обрабатывается "как есть".

View File

@ -4,26 +4,437 @@
结果类型是一个整数其位数等于其参数的最大位。如果至少有一个参数为有符数字则结果为有符数字。如果参数是浮点数则将其强制转换为Int64。
## bitAnd(a,b) {#bitanda-b}
## bitAnd(a, b) {#bitanda-b}
## bitOr(a,b) {#bitora-b}
## bitOr(a, b) {#bitora-b}
## bitXor(a,b) {#bitxora-b}
## bitXor(a, b) {#bitxora-b}
## bitNot(a) {#bitnota}
## bitShiftLeft(a,b) {#bitshiftlefta-b}
## bitShiftLeft(a, b) {#bitshiftlefta-b}
## bitShiftRight(a,b) {#bitshiftrighta-b}
将值的二进制表示向左移动指定数量的位。
## bitRotateLeft(a,b) {#bitrotatelefta-b}
`FixedString``String` 被视为单个多字节值。
## bitRotateRight(a,b) {#bitrotaterighta-b}
`FixedString` 值的位在移出时会丢失。相反,`String` 值使用额外的字节进行扩展,因此不会丢失任何位。
## bitTest(a,b) {#bittesta-b}
**语法**
## bitTestAll(a,b) {#bittestalla-b}
``` sql
bitShiftLeft(a, b)
```
**参数**
- `a` — 要进行移位操作的值。类型可以为[Integer types](../../sql-reference/data-types/int-uint.md)[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
- `b` — 移位的次数。类型为[Unsigned integer types](../../sql-reference/data-types/int-uint.md)允许使用64位数字及64位以下的数字类型。
**返回值**
- 移位后的值。
返回值的类型与输入值的类型相同。
**示例**
在以下查询中,[bin](encoding-functions.md#bin)和[hex](encoding-functions.md#hex)函数用于显示移位值的位。
``` sql
SELECT 99 AS a, bin(a), bitShiftLeft(a, 2) AS a_shifted, bin(a_shifted);
SELECT 'abc' AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftLeft(a, 4) AS a_shifted, hex(a_shifted);
```
结果:
``` text
┌──a─┬─bin(99)──┬─a_shifted─┬─bin(bitShiftLeft(99, 2))─┐
│ 99 │ 01100011 │ 140 │ 10001100 │
└────┴──────────┴───────────┴──────────────────────────┘
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftLeft('abc', 4))─┐
│ abc │ 616263 │ &0 │ 06162630 │
└─────┴────────────┴───────────┴─────────────────────────────┘
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftLeft(toFixedString('abc', 3), 4))─┐
│ abc │ 616263 │ &0 │ 162630 │
└─────┴──────────────────────────────┴───────────┴───────────────────────────────────────────────┘
```
## bitShiftRight(a, b) {#bitshiftrighta-b}
将值的二进制表示向右移动指定数量的位。
`FixedString`或`String`被视为单个多字节值。请注意,`String`值的长度会随着位的移出而减少。
**语法**
``` sql
bitShiftRight(a, b)
```
**参数**
- `a` — 需要进行位移的值。类型可以为[Integer types](../../sql-reference/data-types/int-uint.md)[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
- `b` — 移位的次数。类型为[Unsigned integer types](../../sql-reference/data-types/int-uint.md)允许使用64位数字及64位以下的数字类型。
**返回值**
- 移位后的值。
返回值的类型与输入值的类型相同。
**示例**
查询语句:
``` sql
SELECT 101 AS a, bin(a), bitShiftRight(a, 2) AS a_shifted, bin(a_shifted);
SELECT 'abc' AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
SELECT toFixedString('abc', 3) AS a, hex(a), bitShiftRight(a, 12) AS a_shifted, hex(a_shifted);
```
结果:
``` text
┌───a─┬─bin(101)─┬─a_shifted─┬─bin(bitShiftRight(101, 2))─┐
│ 101 │ 01100101 │ 25 │ 00011001 │
└─────┴──────────┴───────────┴────────────────────────────┘
┌─a───┬─hex('abc')─┬─a_shifted─┬─hex(bitShiftRight('abc', 12))─┐
│ abc │ 616263 │ │ 0616 │
└─────┴────────────┴───────────┴───────────────────────────────┘
┌─a───┬─hex(toFixedString('abc', 3))─┬─a_shifted─┬─hex(bitShiftRight(toFixedString('abc', 3), 12))─┐
│ abc │ 616263 │ │ 000616 │
└─────┴──────────────────────────────┴───────────┴─────────────────────────────────────────────────┘
```
## bitRotateLeft(a, b) {#bitrotatelefta-b}
## bitRotateRight(a, b) {#bitrotaterighta-b}
## bitSlice(s, offset, length)
返回从`offset`索引中的`length`位长的位开始的子字符串,位索引从 1 开始。
**语法**
``` sql
bitSlice(s, offset[, length])
```
**参数**
- `s` — 类型可以是[String](../../sql-reference/data-types/string.md)或者[FixedString](../../sql-reference/data-types/fixedstring.md)。
- `offset` — 带位的起始索引,正值表示左侧偏移,负值表示右侧缩进,位编号从 1 开始。
- `length` — 带位的子串长度。如果您指定一个负值,该函数将返回一个开放子字符串 \[offset, array_length - length\]。如果省略该值,该函数将返回子字符串 \[offset, the_end_string\]。如果长度超过s将被截断。如果长度不是8的倍数则在右边填充0。
**返回值**
- 子字符串,类型为[String](../../sql-reference/data-types/string.md)。
**示例**
查询语句:
``` sql
select bin('Hello'), bin(bitSlice('Hello', 1, 8))
select bin('Hello'), bin(bitSlice('Hello', 1, 2))
select bin('Hello'), bin(bitSlice('Hello', 1, 9))
select bin('Hello'), bin(bitSlice('Hello', -4, 8))
```
结果:
``` text
┌─bin('Hello')─────────────────────────────┬─bin(bitSlice('Hello', 1, 8))─┐
│ 0100100001100101011011000110110001101111 │ 01001000 │
└──────────────────────────────────────────┴──────────────────────────────┘
┌─bin('Hello')─────────────────────────────┬─bin(bitSlice('Hello', 1, 2))─┐
│ 0100100001100101011011000110110001101111 │ 01000000 │
└──────────────────────────────────────────┴──────────────────────────────┘
┌─bin('Hello')─────────────────────────────┬─bin(bitSlice('Hello', 1, 9))─┐
│ 0100100001100101011011000110110001101111 │ 0100100000000000 │
└──────────────────────────────────────────┴──────────────────────────────┘
┌─bin('Hello')─────────────────────────────┬─bin(bitSlice('Hello', -4, 8))─┐
│ 0100100001100101011011000110110001101111 │ 11110000 │
└──────────────────────────────────────────┴───────────────────────────────┘
```
## bitTest {#bittest}
取任意整数并将其转换为[binary form](https://en.wikipedia.org/wiki/Binary_number)返回指定位置的位值。位值从右到左数从0开始计数。
**语法**
``` sql
SELECT bitTest(number, index)
```
**参数**
- `number` 整数。
- `index` 要获取位值的位置。
**返回值**
返回指定位置的位值
类型为:`UInt8`。
**示例**
例如,十进制数字 43 在二进制的表示是 101011。
查询语句:
``` sql
SELECT bitTest(43, 1);
```
结果:
``` text
┌─bitTest(43, 1)─┐
│ 1 │
└────────────────┘
```
另一个示例:
查询语句:
``` sql
SELECT bitTest(43, 2);
```
结果:
``` text
┌─bitTest(43, 2)─┐
│ 0 │
└────────────────┘
```
## bitTestAll {#bittestall}
返回给定位置所有位的 [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) 进行与操作的结果。位值从右到左数从0开始计数。
与运算的结果:
0 AND 0 = 0
0 AND 1 = 0
1 AND 0 = 0
1 AND 1 = 1
**语法**
``` sql
SELECT bitTestAll(number, index1, index2, index3, index4, ...)
```
**参数**
- `number` 整数。
- `index1`, `index2`, `index3`, `index4` 位的位置。例如,对于一组位置 (`index1`, `index2`, `index3`, `index4`) 当且仅当它的所有位置都为真时才为真 (`index1` ⋀ `index2`, ⋀ `index3``index4` )。
**返回值**
返回逻辑与的结果。
类型为: `UInt8`
**示例**
例如,十进制数字 43 在二进制的表示是 101011。
查询语句:
``` sql
SELECT bitTestAll(43, 0, 1, 3, 5);
```
结果:
``` text
┌─bitTestAll(43, 0, 1, 3, 5)─┐
│ 1 │
└────────────────────────────┘
```
另一个例子:
查询语句:
``` sql
SELECT bitTestAll(43, 0, 1, 3, 5, 2);
```
结果:
``` text
┌─bitTestAll(43, 0, 1, 3, 5, 2)─┐
│ 0 │
└───────────────────────────────┘
```
## bitTestAny {#bittestany}
返回给定位置所有位的 [logical disjunction](https://en.wikipedia.org/wiki/Logical_disjunction) 进行或操作的结果。位值从右到左数从0开始计数。
或运算的结果:
0 OR 0 = 0
0 OR 1 = 1
1 OR 0 = 1
1 OR 1 = 1
**语法**
``` sql
SELECT bitTestAny(number, index1, index2, index3, index4, ...)
```
**参数**
- `number` 整数。
- `index1`, `index2`, `index3`, `index4` 位的位置。
**返回值**
返回逻辑或的结果。
类型为: `UInt8`
**示例**
例如,十进制数字 43 在二进制的表示是 101011。
查询语句:
``` sql
SELECT bitTestAny(43, 0, 2);
```
结果:
``` text
┌─bitTestAny(43, 0, 2)─┐
│ 1 │
└──────────────────────┘
```
另一个例子:
查询语句:
``` sql
SELECT bitTestAny(43, 4, 2);
```
结果:
``` text
┌─bitTestAny(43, 4, 2)─┐
│ 0 │
└──────────────────────┘
```
## bitCount {#bitcount}
计算数字的二进制表示中值为 1 的位数。
**语法**
``` sql
bitCount(x)
```
**参数**
- `x` — 类型为[Integer](../../sql-reference/data-types/int-uint.md)或[floating-point](../../sql-reference/data-types/float.md)数字。该函数使用内存中的值表示。它允许支持浮点数。
**返回值**
- 输入数字中值为 1 的位数。
该函数不会将输入值转换为更大的类型 ([sign extension](https://en.wikipedia.org/wiki/Sign_extension))。 因此,例如,`bitCount(toUInt8(-1)) = 8`。
类型为: `UInt8`
**示例**
以十进制数字 333 为例,它的二进制表示为: 0000000101001101。
查询语句:
``` sql
SELECT bitCount(333);
```
结果:
``` text
┌─bitCount(333)─┐
│ 5 │
└───────────────┘
```
## bitHammingDistance {#bithammingdistance}
返回两个整数值的位表示之间的 [Hamming Distance](https://en.wikipedia.org/wiki/Hamming_distance)。可与 [SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash) 函数一起使用,用于检测半重复字符串。距离越小,这些字符串就越有可能相同。
**语法**
``` sql
bitHammingDistance(int1, int2)
```
**参数**
- `int1` — 第一个整数值。类型为[Int64](../../sql-reference/data-types/int-uint.md)。
- `int2` — 第二个整数值。类型为[Int64](../../sql-reference/data-types/int-uint.md)。
**返回值**
- 汉明距离。
类型为: [UInt8](../../sql-reference/data-types/int-uint.md)。
**示例**
查询语句:
``` sql
SELECT bitHammingDistance(111, 121);
```
结果:
``` text
┌─bitHammingDistance(111, 121)─┐
│ 3 │
└──────────────────────────────┘
```
使用[SimHash](../../sql-reference/functions/hash-functions.md#ngramsimhash)函数:
``` sql
SELECT bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat'));
```
结果:
``` text
┌─bitHammingDistance(ngramSimHash('cat ate rat'), ngramSimHash('rat ate cat'))─┐
│ 5 │
└──────────────────────────────────────────────────────────────────────────────┘
```
## bitTestAny(a,b) {#bittestanya-b}
[来源文章](https://clickhouse.com/docs/en/query_language/functions/bit_functions/) <!--hide-->

View File

@ -1,8 +1,8 @@
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
#include <fcntl.h>
#include <signal.h>
#include <time.h>
#include <csignal>
#include <ctime>
#include <iostream>
#include <fstream>
#include <iomanip>

View File

@ -1,4 +1,4 @@
#include <stdlib.h>
#include <cstdlib>
#include <fcntl.h>
#include <map>
#include <iostream>

View File

@ -1,5 +1,5 @@
#include <signal.h>
#include <setjmp.h>
#include <csignal>
#include <csetjmp>
#include <unistd.h>
#ifdef __linux__
@ -335,7 +335,7 @@ struct Checker
;
/// NOTE: We will migrate to full static linking or our own dynamic loader to make this code obsolete.
void checkHarmfulEnvironmentVariables()
void checkHarmfulEnvironmentVariables(char ** argv)
{
std::initializer_list<const char *> harmful_env_variables = {
/// The list is a selection from "man ld-linux".
@ -351,14 +351,39 @@ void checkHarmfulEnvironmentVariables()
"DYLD_INSERT_LIBRARIES",
};
bool require_reexec = false;
for (const auto * var : harmful_env_variables)
{
if (const char * value = getenv(var); value && value[0])
{
std::cerr << fmt::format("Environment variable {} is set to {}. It can compromise security.\n", var, value);
_exit(1);
/// NOTE: setenv() is used over unsetenv() since unsetenv() marked as harmful
if (setenv(var, "", true))
{
fmt::print(stderr, "Cannot override {} environment variable", var);
_exit(1);
}
require_reexec = true;
}
}
if (require_reexec)
{
/// Use execvp() over execv() to search in PATH.
///
/// This should be safe, since:
/// - if argv[0] is relative path - it is OK
/// - if argv[0] has only basename, the it will search in PATH, like shell will do.
///
/// Also note, that this (search in PATH) because there is no easy and
/// portable way to get absolute path of argv[0].
/// - on linux there is /proc/self/exec and AT_EXECFN
/// - but on other OSes there is no such thing (especially on OSX).
///
/// And since static linking will be done someday anyway,
/// let's not pollute the code base with special cases.
int error = execvp(argv[0], argv);
_exit(error);
}
}
}
@ -381,7 +406,7 @@ int main(int argc_, char ** argv_)
inside_main = true;
SCOPE_EXIT({ inside_main = false; });
checkHarmfulEnvironmentVariables();
checkHarmfulEnvironmentVariables(argv_);
/// Reset new handler to default (that throws std::bad_alloc)
/// It is needed because LLVM library clobbers it.

View File

@ -5,7 +5,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <errno.h>
#include <cerrno>
#include <pwd.h>
#include <unistd.h>
#include <Poco/Version.h>

View File

@ -129,8 +129,8 @@
#query_div
{
/* Make enough space for medium/large queries but allowing query textarea to grow. */
min-height: 20%;
/* Make enough space for even huge queries. */
height: 20%;
}
#query

View File

@ -17,7 +17,7 @@
#include <Common/logger_useful.h>
#include <boost/algorithm/string/join.hpp>
#include <boost/range/algorithm/set_algorithm.hpp>
#include <assert.h>
#include <cassert>
namespace DB

View File

@ -14,6 +14,7 @@
#include <AggregateFunctions/IAggregateFunction.h>
#include <Common/config.h>
#include <Common/TargetSpecific.h>
#if USE_EMBEDDED_COMPILER
# include <llvm/IR/IRBuilder.h>
@ -58,8 +59,11 @@ struct AggregateFunctionSumData
}
/// Vectorized version
MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(addManyImpl,
MULTITARGET_FH(
template <typename Value>
void NO_SANITIZE_UNDEFINED NO_INLINE addMany(const Value * __restrict ptr, size_t start, size_t end)
void NO_SANITIZE_UNDEFINED NO_INLINE
), /*addManyImpl*/ MULTITARGET_FB((const Value * __restrict ptr, size_t start, size_t end) /// NOLINT
{
ptr += start;
size_t count = end - start;
@ -95,11 +99,34 @@ struct AggregateFunctionSumData
++ptr;
}
Impl::add(sum, local_sum);
})
)
/// Vectorized version
template <typename Value>
void NO_INLINE addMany(const Value * __restrict ptr, size_t start, size_t end)
{
#if USE_MULTITARGET_CODE
if (isArchSupported(TargetArch::AVX2))
{
addManyImplAVX2(ptr, start, end);
return;
}
else if (isArchSupported(TargetArch::SSE42))
{
addManyImplSSE42(ptr, start, end);
return;
}
#endif
addManyImpl(ptr, start, end);
}
MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(addManyConditionalInternalImpl,
MULTITARGET_FH(
template <typename Value, bool add_if_zero>
void NO_SANITIZE_UNDEFINED NO_INLINE
addManyConditionalInternal(const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
), /*addManyConditionalInternalImpl*/ MULTITARGET_FB((const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end) /// NOLINT
{
ptr += start;
size_t count = end - start;
@ -163,6 +190,27 @@ struct AggregateFunctionSumData
++condition_map;
}
Impl::add(sum, local_sum);
})
)
/// Vectorized version
template <typename Value, bool add_if_zero>
void NO_INLINE addManyConditionalInternal(const Value * __restrict ptr, const UInt8 * __restrict condition_map, size_t start, size_t end)
{
#if USE_MULTITARGET_CODE
if (isArchSupported(TargetArch::AVX2))
{
addManyConditionalInternalImplAVX2<Value, add_if_zero>(ptr, condition_map, start, end);
return;
}
else if (isArchSupported(TargetArch::SSE42))
{
addManyConditionalInternalImplSSE42<Value, add_if_zero>(ptr, condition_map, start, end);
return;
}
#endif
addManyConditionalInternalImpl<Value, add_if_zero>(ptr, condition_map, start, end);
}
template <typename Value>

View File

@ -540,7 +540,7 @@ public:
Arena * arena)
const override
{
size_t current_offset = 0;
size_t current_offset = offsets[static_cast<ssize_t>(row_begin) - 1];
for (size_t i = row_begin; i < row_end; ++i)
{
size_t next_offset = offsets[i];

View File

@ -103,8 +103,9 @@ class QuantileTDigest
*/
static Value interpolate(Value x, Value x1, Value y1, Value x2, Value y2)
{
/// Symmetric interpolation for better results with infinities.
double k = (x - x1) / (x2 - x1);
return y1 + k * (y2 - y1);
return (1 - k) * y1 + k * y2;
}
struct RadixSortTraits
@ -137,6 +138,11 @@ class QuantileTDigest
compress();
}
inline bool canBeMerged(const BetterFloat & l_mean, const Value & r_mean)
{
return l_mean == r_mean || (!std::isinf(l_mean) && !std::isinf(r_mean));
}
void compressBrute()
{
if (centroids.size() <= params.max_centroids)
@ -149,13 +155,17 @@ class QuantileTDigest
BetterFloat l_mean = l->mean; // We have high-precision temporaries for numeric stability
BetterFloat l_count = l->count;
size_t batch_pos = 0;
for (;r != centroids.end(); ++r)
for (; r != centroids.end(); ++r)
{
if (batch_pos < batch_size - 1)
{
/// The left column "eats" the right. Middle of the batch
l_count += r->count;
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
if (r->mean != l_mean) /// Handling infinities of the same sign well.
{
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
}
l->mean = l_mean;
l->count = l_count;
batch_pos += 1;
@ -163,8 +173,11 @@ class QuantileTDigest
else
{
// End of the batch, start the next one
sum += l->count; // Not l_count, otherwise actual sum of elements will be different
++l;
if (!std::isnan(l->mean)) /// Skip writing batch result if we compressed something to nan.
{
sum += l->count; // Not l_count, otherwise actual sum of elements will be different
++l;
}
/// We skip all the values "eaten" earlier.
*l = *r;
@ -173,8 +186,17 @@ class QuantileTDigest
batch_pos = 0;
}
}
count = sum + l_count; // Update count, it might be different due to += inaccuracy
centroids.resize(l - centroids.begin() + 1);
if (!std::isnan(l->mean))
{
count = sum + l_count; // Update count, it might be different due to += inaccuracy
centroids.resize(l - centroids.begin() + 1);
}
else /// Skip writing last batch if (super unlikely) it's nan.
{
count = sum;
centroids.resize(l - centroids.begin());
}
// Here centroids.size() <= params.max_centroids
}
@ -200,11 +222,8 @@ public:
BetterFloat l_count = l->count;
while (r != centroids.end())
{
/// N.B. Piece of logic which compresses the same singleton centroids into one centroid is removed
/// because: 1) singleton centroids are being processed in unusual way in recent version of algorithm
/// and such compression would break this logic;
/// 2) we shall not compress centroids further than `max_centroids` parameter requires because
/// this will lead to uneven compression.
/// N.B. We cannot merge all the same values into single centroids because this will lead to
/// unbalanced compression and wrong results.
/// For more information see: https://arxiv.org/abs/1902.04023
/// The ratio of the part of the histogram to l, including the half l to the entire histogram. That is, what level quantile in position l.
@ -225,12 +244,15 @@ public:
* and at the edges decreases and is approximately equal to the distance to the edge * 4.
*/
if (l_count + r->count <= k)
if (l_count + r->count <= k && canBeMerged(l_mean, r->mean))
{
// it is possible to merge left and right
/// The left column "eats" the right.
l_count += r->count;
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
if (r->mean != l_mean) /// Handling infinities of the same sign well.
{
l_mean += r->count * (r->mean - l_mean) / l_count; // Symmetric algo (M1*C1 + M2*C2)/(C1+C2) is numerically better, but slower
}
l->mean = l_mean;
l->count = l_count;
}
@ -254,6 +276,7 @@ public:
centroids.resize(l - centroids.begin() + 1);
unmerged = 0;
}
// Ensures centroids.size() < max_centroids, independent of unprovable floating point blackbox above
compressBrute();
}
@ -298,10 +321,17 @@ public:
for (const auto & c : centroids)
{
if (c.count <= 0 || std::isnan(c.count) || std::isnan(c.mean)) // invalid count breaks compress(), invalid mean breaks sort()
if (c.count <= 0 || std::isnan(c.count)) // invalid count breaks compress()
throw Exception("Invalid centroid " + std::to_string(c.count) + ":" + std::to_string(c.mean), ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED);
count += c.count;
if (!std::isnan(c.mean))
{
count += c.count;
}
}
auto it = std::remove_if(centroids.begin(), centroids.end(), [](Centroid & c) { return std::isnan(c.mean); });
centroids.erase(it, centroids.end());
compress(); // Allows reading/writing TDigests with different epsilon/max_centroids params
}
@ -312,7 +342,7 @@ public:
ResultType getImpl(Float64 level)
{
if (centroids.empty())
return std::is_floating_point_v<ResultType> ? NAN : 0;
return std::is_floating_point_v<ResultType> ? std::numeric_limits<ResultType>::quiet_NaN() : 0;
compress();
@ -395,7 +425,6 @@ public:
while (current_x >= x)
{
if (x <= left)
result[levels_permutation[result_num]] = prev_mean;
else if (x >= right)

View File

@ -25,6 +25,34 @@ namespace
{
using SizeAndChecksum = IBackupCoordination::SizeAndChecksum;
using FileInfo = IBackupCoordination::FileInfo;
using PartNameAndChecksum = IBackupCoordination::PartNameAndChecksum;
String serializePartNamesAndChecksums(const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
WriteBufferFromOwnString out;
writeBinary(part_names_and_checksums.size(), out);
for (const auto & part_name_and_checksum : part_names_and_checksums)
{
writeBinary(part_name_and_checksum.part_name, out);
writeBinary(part_name_and_checksum.checksum, out);
}
return out.str();
}
std::vector<PartNameAndChecksum> deserializePartNamesAndChecksums(const String & str)
{
ReadBufferFromString in{str};
std::vector<PartNameAndChecksum> part_names_and_checksums;
size_t num;
readBinary(num, in);
part_names_and_checksums.resize(num);
for (size_t i = 0; i != num; ++i)
{
readBinary(part_names_and_checksums[i].part_name, in);
readBinary(part_names_and_checksums[i].checksum, in);
}
return part_names_and_checksums;
}
String serializeFileInfo(const FileInfo & info)
{
@ -92,7 +120,9 @@ namespace
}
BackupCoordinationDistributed::BackupCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
: zookeeper_path(zookeeper_path_), get_zookeeper(get_zookeeper_)
: zookeeper_path(zookeeper_path_)
, get_zookeeper(get_zookeeper_)
, preparing_barrier(zookeeper_path_ + "/preparing", get_zookeeper_, "BackupCoordination", "preparing")
{
createRootNodes();
}
@ -104,6 +134,8 @@ void BackupCoordinationDistributed::createRootNodes()
auto zookeeper = get_zookeeper();
zookeeper->createAncestors(zookeeper_path);
zookeeper->createIfNotExists(zookeeper_path, "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_paths", "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_parts", "");
zookeeper->createIfNotExists(zookeeper_path + "/file_names", "");
zookeeper->createIfNotExists(zookeeper_path + "/file_infos", "");
zookeeper->createIfNotExists(zookeeper_path + "/archive_suffixes", "");
@ -115,6 +147,102 @@ void BackupCoordinationDistributed::removeAllNodes()
zookeeper->removeRecursive(zookeeper_path);
}
void BackupCoordinationDistributed::addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_paths/" + escapeForFileName(table_zk_path);
zookeeper->createIfNotExists(path, "");
path += "/" + escapeForFileName(table_data_path);
zookeeper->createIfNotExists(path, "");
}
void BackupCoordinationDistributed::addReplicatedTablePartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_parts/" + escapeForFileName(table_zk_path);
zookeeper->createIfNotExists(path, "");
path += "/" + escapeForFileName(host_id);
zookeeper->createIfNotExists(path, "");
path += "/" + escapeForFileName(table_name.first);
zookeeper->createIfNotExists(path, "");
path += "/" + escapeForFileName(table_name.second);
zookeeper->create(path, serializePartNamesAndChecksums(part_names_and_checksums), zkutil::CreateMode::Persistent);
}
void BackupCoordinationDistributed::finishPreparing(const String & host_id, const String & error_message)
{
preparing_barrier.finish(host_id, error_message);
}
void BackupCoordinationDistributed::waitForAllHostsPrepared(const Strings & host_ids, std::chrono::seconds timeout) const
{
preparing_barrier.waitForAllHostsToFinish(host_ids, timeout);
prepareReplicatedTablesInfo();
}
void BackupCoordinationDistributed::prepareReplicatedTablesInfo() const
{
replicated_tables.emplace();
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_paths";
for (const String & escaped_table_zk_path : zookeeper->getChildren(path))
{
String table_zk_path = unescapeForFileName(escaped_table_zk_path);
for (const String & escaped_data_path : zookeeper->getChildren(path + "/" + escaped_table_zk_path))
{
String data_path = unescapeForFileName(escaped_data_path);
replicated_tables->addDataPath(table_zk_path, data_path);
}
}
path = zookeeper_path + "/repl_tables_parts";
for (const String & escaped_table_zk_path : zookeeper->getChildren(path))
{
String table_zk_path = unescapeForFileName(escaped_table_zk_path);
String path2 = path + "/" + escaped_table_zk_path;
for (const String & escaped_host_id : zookeeper->getChildren(path2))
{
String host_id = unescapeForFileName(escaped_host_id);
String path3 = path2 + "/" + escaped_host_id;
for (const String & escaped_database_name : zookeeper->getChildren(path3))
{
String database_name = unescapeForFileName(escaped_database_name);
String path4 = path3 + "/" + escaped_database_name;
for (const String & escaped_table_name : zookeeper->getChildren(path4))
{
String table_name = unescapeForFileName(escaped_table_name);
String path5 = path4 + "/" + escaped_table_name;
auto part_names_and_checksums = deserializePartNamesAndChecksums(zookeeper->get(path5));
replicated_tables->addPartNames(host_id, {database_name, table_name}, table_zk_path, part_names_and_checksums);
}
}
}
}
replicated_tables->preparePartNamesByLocations();
}
Strings BackupCoordinationDistributed::getReplicatedTableDataPaths(const String & table_zk_path) const
{
return replicated_tables->getDataPaths(table_zk_path);
}
Strings BackupCoordinationDistributed::getReplicatedTablePartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const
{
return replicated_tables->getPartNames(host_id, table_name, table_zk_path);
}
void BackupCoordinationDistributed::addFileInfo(const FileInfo & file_info, bool & is_data_file_required)
{
auto zookeeper = get_zookeeper();

View File

@ -1,6 +1,7 @@
#pragma once
#include <Backups/IBackupCoordination.h>
#include <Backups/BackupCoordinationHelpers.h>
#include <Common/ZooKeeper/Common.h>
#include <map>
#include <unordered_map>
@ -16,6 +17,19 @@ public:
BackupCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_);
~BackupCoordinationDistributed() override;
void addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path) override;
void addReplicatedTablePartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
void finishPreparing(const String & host_id, const String & error_message) override;
void waitForAllHostsPrepared(const Strings & host_ids, std::chrono::seconds timeout) const override;
Strings getReplicatedTableDataPaths(const String & table_zk_path) const override;
Strings getReplicatedTablePartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const override;
void addFileInfo(const FileInfo & file_info, bool & is_data_file_required) override;
void updateFileInfo(const FileInfo & file_info) override;
@ -33,9 +47,12 @@ public:
private:
void createRootNodes();
void removeAllNodes();
void prepareReplicatedTablesInfo() const;
const String zookeeper_path;
const zkutil::GetZooKeeper get_zookeeper;
BackupCoordinationDistributedBarrier preparing_barrier;
mutable std::optional<BackupCoordinationReplicatedTablesInfo> replicated_tables;
};
}

View File

@ -0,0 +1,416 @@
#include <Backups/BackupCoordinationHelpers.h>
#include <Storages/MergeTree/MergeTreePartInfo.h>
#include <Common/Exception.h>
#include <base/chrono_io.h>
#include <boost/range/adaptor/map.hpp>
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_BACKUP_TABLE;
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
extern const int LOGICAL_ERROR;
}
struct BackupCoordinationReplicatedTablesInfo::HostAndTableName
{
String host_id;
DatabaseAndTableName table_name;
struct Less
{
bool operator()(const HostAndTableName & lhs, const HostAndTableName & rhs) const
{
return (lhs.host_id < rhs.host_id) || ((lhs.host_id == rhs.host_id) && (lhs.table_name < rhs.table_name));
}
bool operator()(const std::shared_ptr<const HostAndTableName> & lhs, const std::shared_ptr<const HostAndTableName> & rhs) const
{
return operator()(*lhs, *rhs);
}
};
};
class BackupCoordinationReplicatedTablesInfo::CoveredPartsFinder
{
public:
CoveredPartsFinder() = default;
void addPart(const String & new_part_name, const std::shared_ptr<const HostAndTableName> & host_and_table_name)
{
addPart(MergeTreePartInfo::fromPartName(new_part_name, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING), host_and_table_name);
}
void addPart(MergeTreePartInfo && new_part_info, const std::shared_ptr<const HostAndTableName> & host_and_table_name)
{
auto new_min_block = new_part_info.min_block;
auto new_max_block = new_part_info.max_block;
auto & parts = partitions[new_part_info.partition_id];
/// Find the first part with max_block >= `part_info.min_block`.
auto first_it = parts.lower_bound(new_min_block);
if (first_it == parts.end())
{
/// All max_blocks < part_info.min_block, so we can safely add the `part_info` to the list of parts.
parts.emplace(new_max_block, PartInfo{std::move(new_part_info), host_and_table_name});
return;
}
{
/// part_info.min_block <= current_info.max_block
const auto & part = first_it->second;
if (new_max_block < part.info.min_block)
{
/// (prev_info.max_block < part_info.min_block) AND (part_info.max_block < current_info.min_block),
/// so we can safely add the `part_info` to the list of parts.
parts.emplace(new_max_block, PartInfo{std::move(new_part_info), host_and_table_name});
return;
}
/// (part_info.min_block <= current_info.max_block) AND (part_info.max_block >= current_info.min_block), parts intersect.
if (part.info.contains(new_part_info))
{
/// `part_info` is already contained in another part.
return;
}
}
/// Probably `part_info` is going to replace multiple parts, find the range of parts to replace.
auto last_it = first_it;
while (last_it != parts.end())
{
const auto & part = last_it->second;
if (part.info.min_block > new_max_block)
break;
if (!new_part_info.contains(part.info))
{
throw Exception(
ErrorCodes::CANNOT_BACKUP_TABLE,
"Intersected parts detected: {} in the table {}.{}{} and {} in the table {}.{}{}. It should be investigated",
part.info.getPartName(),
part.host_and_table_name->table_name.first,
part.host_and_table_name->table_name.second,
part.host_and_table_name->host_id.empty() ? "" : (" on the host " + part.host_and_table_name->host_id),
new_part_info.getPartName(),
host_and_table_name->table_name.first,
host_and_table_name->table_name.second,
host_and_table_name->host_id.empty() ? "" : (" on the host " + host_and_table_name->host_id));
}
++last_it;
}
/// `part_info` will replace multiple parts [first_it..last_it)
parts.erase(first_it, last_it);
parts.emplace(new_max_block, PartInfo{std::move(new_part_info), host_and_table_name});
}
bool isCoveredByAnotherPart(const String & part_name) const
{
return isCoveredByAnotherPart(MergeTreePartInfo::fromPartName(part_name, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING));
}
bool isCoveredByAnotherPart(const MergeTreePartInfo & part_info) const
{
auto partition_it = partitions.find(part_info.partition_id);
if (partition_it == partitions.end())
return false;
const auto & parts = partition_it->second;
/// Find the first part with max_block >= `part_info.min_block`.
auto it_part = parts.lower_bound(part_info.min_block);
if (it_part == parts.end())
{
/// All max_blocks < part_info.min_block, so there is no parts covering `part_info`.
return false;
}
/// part_info.min_block <= current_info.max_block
const auto & existing_part = it_part->second;
if (part_info.max_block < existing_part.info.min_block)
{
/// (prev_info.max_block < part_info.min_block) AND (part_info.max_block < current_info.min_block),
/// so there is no parts covering `part_info`.
return false;
}
/// (part_info.min_block <= current_info.max_block) AND (part_info.max_block >= current_info.min_block), parts intersect.
if (existing_part.info == part_info)
{
/// It's the same part, it's kind of covers itself, but we check in this function whether a part is covered by another part.
return false;
}
/// Check if `part_info` is covered by `current_info`.
return existing_part.info.contains(part_info);
}
private:
struct PartInfo
{
MergeTreePartInfo info;
std::shared_ptr<const HostAndTableName> host_and_table_name;
};
using Parts = std::map<Int64 /* max_block */, PartInfo>;
std::unordered_map<String, Parts> partitions;
};
void BackupCoordinationReplicatedTablesInfo::addDataPath(const String & table_zk_path, const String & table_data_path)
{
tables[table_zk_path].data_paths.push_back(table_data_path);
}
Strings BackupCoordinationReplicatedTablesInfo::getDataPaths(const String & table_zk_path) const
{
auto it = tables.find(table_zk_path);
if (it == tables.end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "getDataPaths() called for unknown table_zk_path: {}", table_zk_path);
const auto & replicated_table = it->second;
return replicated_table.data_paths;
}
void BackupCoordinationReplicatedTablesInfo::addPartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
auto & table = tables[table_zk_path];
auto & part_locations_by_names = table.part_locations_by_names;
auto host_and_table_name = std::make_shared<HostAndTableName>();
host_and_table_name->host_id = host_id;
host_and_table_name->table_name = table_name;
for (const auto & part_name_and_checksum : part_names_and_checksums)
{
const auto & part_name = part_name_and_checksum.part_name;
const auto & checksum = part_name_and_checksum.checksum;
auto it = part_locations_by_names.find(part_name);
if (it == part_locations_by_names.end())
{
it = part_locations_by_names.emplace(part_name, PartLocations{}).first;
it->second.checksum = checksum;
}
else
{
const auto & existing = it->second;
if (existing.checksum != checksum)
{
const auto & existing_host_and_table_name = **existing.host_and_table_names.begin();
throw Exception(
ErrorCodes::CANNOT_BACKUP_TABLE,
"Table {}.{} has part {} which is different from the part of table {}.{}. Must be the same",
table_name.first,
table_name.second,
part_name,
existing_host_and_table_name.table_name.first,
existing_host_and_table_name.table_name.second);
}
}
auto & host_and_table_names = it->second.host_and_table_names;
/// `host_and_table_names` should be ordered because we need this vector to be in the same order on every replica.
host_and_table_names.insert(
std::upper_bound(host_and_table_names.begin(), host_and_table_names.end(), host_and_table_name, HostAndTableName::Less{}),
host_and_table_name);
}
}
Strings BackupCoordinationReplicatedTablesInfo::getPartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const
{
if (!part_names_by_locations_prepared)
throw Exception(ErrorCodes::LOGICAL_ERROR, "preparePartNamesByLocations() was not called before getPartNames()");
auto it = tables.find(table_zk_path);
if (it == tables.end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "getPartNames() called for unknown table_zk_path: {}", table_zk_path);
const auto & table = it->second;
auto it2 = table.part_names_by_locations.find(host_id);
if (it2 == table.part_names_by_locations.end())
return {};
const auto & part_names_by_host_id = it2->second;
auto it3 = part_names_by_host_id.find(table_name);
if (it3 == part_names_by_host_id.end())
return {};
return it3->second;
}
void BackupCoordinationReplicatedTablesInfo::preparePartNamesByLocations()
{
if (part_names_by_locations_prepared)
return;
part_names_by_locations_prepared = true;
size_t counter = 0;
for (auto & table : tables | boost::adaptors::map_values)
{
CoveredPartsFinder covered_parts_finder;
for (const auto & [part_name, part_locations] : table.part_locations_by_names)
covered_parts_finder.addPart(part_name, *part_locations.host_and_table_names.begin());
table.part_names_by_locations.clear();
for (const auto & [part_name, part_locations] : table.part_locations_by_names)
{
if (covered_parts_finder.isCoveredByAnotherPart(part_name))
continue;
size_t chosen_index = (counter++) % part_locations.host_and_table_names.size();
const auto & chosen_host_id = part_locations.host_and_table_names[chosen_index]->host_id;
const auto & chosen_table_name = part_locations.host_and_table_names[chosen_index]->table_name;
table.part_names_by_locations[chosen_host_id][chosen_table_name].push_back(part_name);
}
}
}
BackupCoordinationDistributedBarrier::BackupCoordinationDistributedBarrier(
const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, const String & logger_name_, const String & operation_name_)
: zookeeper_path(zookeeper_path_)
, get_zookeeper(get_zookeeper_)
, log(&Poco::Logger::get(logger_name_))
, operation_name(operation_name_)
{
createRootNodes();
}
void BackupCoordinationDistributedBarrier::createRootNodes()
{
auto zookeeper = get_zookeeper();
zookeeper->createAncestors(zookeeper_path);
zookeeper->createIfNotExists(zookeeper_path, "");
}
void BackupCoordinationDistributedBarrier::finish(const String & host_id, const String & error_message)
{
if (error_message.empty())
LOG_TRACE(log, "Host {} has finished {}", host_id, operation_name);
else
LOG_ERROR(log, "Host {} has failed {} with message: {}", host_id, operation_name, error_message);
auto zookeeper = get_zookeeper();
if (error_message.empty())
zookeeper->create(zookeeper_path + "/" + host_id + ":ready", "", zkutil::CreateMode::Persistent);
else
zookeeper->create(zookeeper_path + "/" + host_id + ":error", error_message, zkutil::CreateMode::Persistent);
}
void BackupCoordinationDistributedBarrier::waitForAllHostsToFinish(const Strings & host_ids, const std::chrono::seconds timeout) const
{
auto zookeeper = get_zookeeper();
bool all_hosts_ready = false;
String not_ready_host_id;
String error_host_id;
String error_message;
/// Returns true of everything's ready, or false if we need to wait more.
auto process_nodes = [&](const Strings & nodes)
{
std::unordered_set<std::string_view> set{nodes.begin(), nodes.end()};
for (const String & host_id : host_ids)
{
if (set.contains(host_id + ":error"))
{
error_host_id = host_id;
error_message = zookeeper->get(zookeeper_path + "/" + host_id + ":error");
return;
}
if (!set.contains(host_id + ":ready"))
{
LOG_TRACE(log, "Waiting for host {} {}", host_id, operation_name);
not_ready_host_id = host_id;
return;
}
}
all_hosts_ready = true;
};
std::atomic<bool> watch_set = false;
std::condition_variable watch_triggered_event;
auto watch_callback = [&](const Coordination::WatchResponse &)
{
watch_set = false; /// After it's triggered it's not set until we call getChildrenWatch() again.
watch_triggered_event.notify_all();
};
auto watch_triggered = [&] { return !watch_set; };
bool use_timeout = (timeout.count() >= 0);
std::chrono::steady_clock::duration time_left = timeout;
std::mutex dummy_mutex;
while (true)
{
if (use_timeout && (time_left.count() <= 0))
{
Strings children = zookeeper->getChildren(zookeeper_path);
process_nodes(children);
break;
}
watch_set = true;
Strings children = zookeeper->getChildrenWatch(zookeeper_path, nullptr, watch_callback);
process_nodes(children);
if (!error_message.empty() || all_hosts_ready)
break;
{
std::unique_lock dummy_lock{dummy_mutex};
if (use_timeout)
{
std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
if (!watch_triggered_event.wait_for(dummy_lock, time_left, watch_triggered))
break;
time_left -= (std::chrono::steady_clock::now() - start_time);
}
else
watch_triggered_event.wait(dummy_lock, watch_triggered);
}
}
if (watch_set)
{
/// Remove watch by triggering it.
zookeeper->create(zookeeper_path + "/remove_watch-", "", zkutil::CreateMode::EphemeralSequential);
std::unique_lock dummy_lock{dummy_mutex};
watch_triggered_event.wait_for(dummy_lock, timeout, watch_triggered);
}
if (!error_message.empty())
{
throw Exception(
ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE,
"Host {} failed {} with message: {}",
error_host_id,
operation_name,
error_message);
}
if (all_hosts_ready)
{
LOG_TRACE(log, "All hosts have finished {}", operation_name);
return;
}
throw Exception(
ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE,
"Host {} has failed {}: Time ({}) is out",
not_ready_host_id,
operation_name,
to_string(timeout));
}
}

View File

@ -0,0 +1,90 @@
#pragma once
#include <Backups/IBackupCoordination.h>
#include <Common/ZooKeeper/Common.h>
#include <map>
#include <unordered_map>
namespace DB
{
/// Helper designed to be used in an implementation of the IBackupCoordination interface in the part related to replicated tables.
class BackupCoordinationReplicatedTablesInfo
{
public:
BackupCoordinationReplicatedTablesInfo() = default;
/// Adds a data path in backup for a replicated table.
/// Multiple replicas of the replicated table call this function and then all the added paths can be returned by call of the function
/// getReplicatedTableDataPaths().
void addDataPath(const String & table_zk_path, const String & table_data_path);
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedTableDataPath()).
Strings getDataPaths(const String & table_zk_path) const;
using PartNameAndChecksum = IBackupCoordination::PartNameAndChecksum;
/// Adds part names which a specified replica of a replicated table is going to put to the backup.
/// Multiple replicas of the replicated table call this function and then the added part names can be returned by call of the function
/// getReplicatedTablePartNames().
/// Checksums are used only to control that parts under the same names on different replicas are the same.
void addPartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums);
void preparePartNamesByLocations();
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
/// This is the same list as it was added by call of the function addReplicatedTablePartNames() but without duplications and without
/// parts covered by another parts.
Strings getPartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const;
private:
class CoveredPartsFinder;
struct HostAndTableName;
struct PartLocations
{
std::vector<std::shared_ptr<const HostAndTableName>> host_and_table_names;
UInt128 checksum;
};
struct TableInfo
{
Strings data_paths;
std::map<String /* part_name */, PartLocations> part_locations_by_names; /// Should be ordered because we need this map to be in the same order on every replica.
std::unordered_map<String /* host_id */, std::map<DatabaseAndTableName, Strings /* part_names */>> part_names_by_locations;
};
std::unordered_map<String /* zk_path */, TableInfo> tables;
bool part_names_by_locations_prepared = false;
};
/// Helper designed to be used in the implementation of the BackupCoordinationDistributed and RestoreCoordinationDistributed classes
/// to implement synchronization when we need all hosts to finish a specific task and then continue.
class BackupCoordinationDistributedBarrier
{
public:
BackupCoordinationDistributedBarrier(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_, const String & logger_name_, const String & operation_name_);
/// Sets that a specified host has finished the specific task, successfully or with an error.
/// In the latter case `error_message` should be set.
void finish(const String & host_id, const String & error_message = {});
/// Waits for a specified list of hosts to finish the specific task.
void waitForAllHostsToFinish(const Strings & host_ids, const std::chrono::seconds timeout = std::chrono::seconds(-1) /* no timeout */) const;
private:
void createRootNodes();
String zookeeper_path;
zkutil::GetZooKeeper get_zookeeper;
const Poco::Logger * log;
String operation_name;
};
}

View File

@ -1,15 +1,59 @@
#include <Backups/BackupCoordinationLocal.h>
#include <Common/Exception.h>
#include <Common/logger_useful.h>
#include <fmt/format.h>
namespace DB
{
using SizeAndChecksum = IBackupCoordination::SizeAndChecksum;
using FileInfo = IBackupCoordination::FileInfo;
BackupCoordinationLocal::BackupCoordinationLocal() = default;
BackupCoordinationLocal::BackupCoordinationLocal() : log(&Poco::Logger::get("BackupCoordination"))
{
}
BackupCoordinationLocal::~BackupCoordinationLocal() = default;
void BackupCoordinationLocal::addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path)
{
std::lock_guard lock{mutex};
replicated_tables.addDataPath(table_zk_path, table_data_path);
}
void BackupCoordinationLocal::addReplicatedTablePartNames(const String & /* host_id */, const DatabaseAndTableName & table_name, const String & table_zk_path, const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
std::lock_guard lock{mutex};
replicated_tables.addPartNames("", table_name, table_zk_path, part_names_and_checksums);
}
void BackupCoordinationLocal::finishPreparing(const String & /* host_id */, const String & error_message)
{
LOG_TRACE(log, "Finished preparing{}", (error_message.empty() ? "" : (" with error " + error_message)));
if (!error_message.empty())
return;
replicated_tables.preparePartNamesByLocations();
}
void BackupCoordinationLocal::waitForAllHostsPrepared(const Strings & /* host_ids */, std::chrono::seconds /* timeout */) const
{
}
Strings BackupCoordinationLocal::getReplicatedTableDataPaths(const String & table_zk_path) const
{
std::lock_guard lock{mutex};
return replicated_tables.getDataPaths(table_zk_path);
}
Strings BackupCoordinationLocal::getReplicatedTablePartNames(const String & /* host_id */, const DatabaseAndTableName & table_name, const String & table_zk_path) const
{
std::lock_guard lock{mutex};
return replicated_tables.getPartNames("", table_name, table_zk_path);
}
void BackupCoordinationLocal::addFileInfo(const FileInfo & file_info, bool & is_data_file_required)
{
std::lock_guard lock{mutex};

View File

@ -1,10 +1,13 @@
#pragma once
#include <Backups/IBackupCoordination.h>
#include <Backups/BackupCoordinationHelpers.h>
#include <map>
#include <mutex>
namespace Poco { class Logger; }
namespace DB
{
@ -15,6 +18,19 @@ public:
BackupCoordinationLocal();
~BackupCoordinationLocal() override;
void addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path) override;
void addReplicatedTablePartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
void finishPreparing(const String & host_id, const String & error_message) override;
void waitForAllHostsPrepared(const Strings & host_ids, std::chrono::seconds timeout) const override;
Strings getReplicatedTableDataPaths(const String & table_zk_path) const override;
Strings getReplicatedTablePartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const override;
void addFileInfo(const FileInfo & file_info, bool & is_data_file_required) override;
void updateFileInfo(const FileInfo & file_info) override;
@ -30,10 +46,14 @@ public:
private:
mutable std::mutex mutex;
BackupCoordinationReplicatedTablesInfo replicated_tables;
std::map<String /* file_name */, SizeAndChecksum> file_names; /// Should be ordered alphabetically, see listFiles(). For empty files we assume checksum = 0.
std::map<SizeAndChecksum, FileInfo> file_infos; /// Information about files. Without empty files.
Strings archive_suffixes;
size_t current_archive_suffix = 0;
const Poco::Logger * log;
};
}

View File

@ -12,6 +12,7 @@
namespace DB
{
class IBackupCoordination;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
@ -32,7 +33,7 @@ public:
String password;
ContextPtr context;
bool is_internal_backup = false;
String coordination_zk_path;
std::shared_ptr<IBackupCoordination> backup_coordination;
};
static BackupFactory & instance();

View File

@ -125,11 +125,10 @@ BackupImpl::BackupImpl(
, reader(std::move(reader_))
, is_internal_backup(false)
, coordination(std::make_shared<BackupCoordinationLocal>())
, context(context_)
, version(INITIAL_BACKUP_VERSION)
, base_backup_info(base_backup_info_)
{
open();
open(context_);
}
@ -141,24 +140,20 @@ BackupImpl::BackupImpl(
const ContextPtr & context_,
const std::optional<UUID> & backup_uuid_,
bool is_internal_backup_,
const String & coordination_zk_path_)
const std::shared_ptr<IBackupCoordination> & coordination_)
: backup_name(backup_name_)
, archive_params(archive_params_)
, use_archives(!archive_params.archive_name.empty())
, open_mode(OpenMode::WRITE)
, writer(std::move(writer_))
, is_internal_backup(is_internal_backup_)
, context(context_)
, coordination(coordination_ ? coordination_ : std::make_shared<BackupCoordinationLocal>())
, uuid(backup_uuid_)
, version(CURRENT_BACKUP_VERSION)
, base_backup_info(base_backup_info_)
, log(&Poco::Logger::get("Backup"))
{
if (coordination_zk_path_.empty())
coordination = std::make_shared<BackupCoordinationLocal>();
else
coordination = std::make_shared<BackupCoordinationDistributed>(coordination_zk_path_, [&] { return context->getZooKeeper(); });
open();
open(context_);
}
@ -168,7 +163,7 @@ BackupImpl::~BackupImpl()
}
void BackupImpl::open()
void BackupImpl::open(const ContextPtr & context)
{
std::lock_guard lock{mutex};
@ -224,17 +219,21 @@ void BackupImpl::close()
std::lock_guard lock{mutex};
if (!is_internal_backup && writing_finalized)
{
LOG_TRACE(log, "Finalizing backup {}", backup_name);
writeBackupMetadata();
LOG_INFO(log, "Finalized backup {}", backup_name);
}
archive_readers.clear();
for (auto & archive_writer : archive_writers)
archive_writer = {"", nullptr};
if (!is_internal_backup && writer && !writing_finalized)
{
LOG_INFO(log, "Removing all files of backup {} after failure", backup_name);
removeAllFilesAfterFailure();
if (!is_internal_backup)
coordination->drop();
}
}
time_t BackupImpl::getTimestamp() const

View File

@ -49,7 +49,7 @@ public:
const ContextPtr & context_,
const std::optional<UUID> & backup_uuid_ = {},
bool is_internal_backup_ = false,
const String & coordination_zk_path_ = {});
const std::shared_ptr<IBackupCoordination> & coordination_ = {});
~BackupImpl() override;
@ -73,7 +73,7 @@ private:
using FileInfo = IBackupCoordination::FileInfo;
class BackupEntryFromBackupImpl;
void open();
void open(const ContextPtr & context);
void close();
void writeBackupMetadata();
void readBackupMetadata();
@ -90,7 +90,6 @@ private:
std::shared_ptr<IBackupReader> reader;
const bool is_internal_backup;
std::shared_ptr<IBackupCoordination> coordination;
ContextPtr context;
mutable std::mutex mutex;
std::optional<UUID> uuid;
@ -103,6 +102,7 @@ private:
std::pair<String, std::shared_ptr<IArchiveWriter>> archive_writers[2];
String current_archive_suffix;
bool writing_finalized = false;
const Poco::Logger * log;
};
}

View File

@ -24,7 +24,6 @@ namespace ErrorCodes
M(Bool, async) \
M(UInt64, shard_num) \
M(UInt64, replica_num) \
M(Bool, allow_storing_multiple_replicas) \
M(Bool, internal) \
M(String, host_id) \
M(String, coordination_zk_path)

View File

@ -11,7 +11,8 @@ class ASTBackupQuery;
/// Settings specified in the "SETTINGS" clause of a BACKUP query.
struct BackupSettings
{
/// Base backup, if it's set an incremental backup will be built.
/// Base backup, if it's set an incremental backup will be built. That means only differences made after the base backup will be put
/// into a new backup.
std::optional<BackupInfo> base_backup_info;
/// Compression method and level for writing the backup (when applicable).
@ -36,9 +37,6 @@ struct BackupSettings
/// Can only be used with BACKUP ON CLUSTER.
size_t replica_num = 0;
/// Allows storing in the backup of multiple replicas.
bool allow_storing_multiple_replicas = false;
/// Internal, should not be specified by user.
/// Whether this backup is a part of a distributed backup created by BACKUP ON CLUSTER.
bool internal = false;

View File

@ -4,16 +4,18 @@
#include <Backups/DDLCompareUtils.h>
#include <Backups/DDLRenamingVisitor.h>
#include <Backups/IBackup.h>
#include <Backups/IBackupCoordination.h>
#include <Backups/formatTableNameOrTemporaryTableName.h>
#include <Backups/replaceTableUUIDWithMacroInReplicatedTableDef.h>
#include <Common/escapeForFileName.h>
#include <Access/Common/AccessFlags.h>
#include <Access/Common/AccessRightsElement.h>
#include <Databases/IDatabase.h>
#include <Interpreters/Context.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/formatAST.h>
#include <Storages/IStorage.h>
#include <Storages/StorageReplicatedMergeTree.h>
namespace DB
@ -98,13 +100,57 @@ namespace
class BackupEntriesBuilder
{
public:
BackupEntriesBuilder(const ContextPtr & context_, const BackupSettings & backup_settings_)
: context(context_), backup_settings(backup_settings_)
BackupEntriesBuilder(const ContextPtr & context_, const BackupSettings & backup_settings_, std::shared_ptr<IBackupCoordination> backup_coordination_)
: context(context_), backup_settings(backup_settings_), backup_coordination(backup_coordination_)
{
}
/// Prepares internal structures for making backup entries.
void prepare(const ASTBackupQuery::Elements & elements)
void prepare(const ASTBackupQuery::Elements & elements, std::chrono::seconds timeout_for_other_nodes_to_prepare)
{
try
{
prepareImpl(elements);
}
catch (...)
{
backup_coordination->finishPreparing(backup_settings.host_id, getCurrentExceptionMessage(false));
throw;
}
/// We've finished restoring metadata, now we will wait for other replicas and shards to finish too.
/// We need this waiting because we're going to call some functions which requires data collected from other nodes too,
/// see IRestoreCoordination::checkTablesNotExistedInReplicatedDBs(), IRestoreCoordination::getReplicatedTableDataPath().
backup_coordination->finishPreparing(backup_settings.host_id);
backup_coordination->waitForAllHostsPrepared(
BackupSettings::Util::filterHostIDs(
backup_settings.cluster_host_ids, backup_settings.shard_num, backup_settings.replica_num),
timeout_for_other_nodes_to_prepare);
}
/// Makes backup entries, should be called after prepare().
BackupEntries makeBackupEntries() const
{
BackupEntries res;
for (const auto & info : databases | boost::adaptors::map_values)
res.push_back(makeBackupEntryForMetadata(*info.create_query));
for (const auto & info : tables | boost::adaptors::map_values)
{
res.push_back(makeBackupEntryForMetadata(*info.create_query));
appendBackupEntriesForData(res, info);
}
/// A backup cannot be empty.
if (res.empty())
throw Exception("Backup must not be empty", ErrorCodes::BACKUP_IS_EMPTY);
return res;
}
private:
void prepareImpl(const ASTBackupQuery::Elements & elements)
{
calculateShardNumAndReplicaNumInBackup();
renaming_settings.setFromBackupQuery(elements);
@ -135,36 +181,6 @@ namespace
}
}
/// Makes backup entries, should be called after prepare().
BackupEntries makeBackupEntries() const
{
BackupEntries res;
for (const auto & info : databases | boost::adaptors::map_values)
res.push_back(makeBackupEntryForMetadata(*info.create_query));
for (const auto & info : tables | boost::adaptors::map_values)
{
res.push_back(makeBackupEntryForMetadata(*info.create_query));
if (info.has_data)
{
auto data_backup = info.storage->backupData(context, info.partitions);
if (!data_backup.empty())
{
String data_path = PathsInBackup::getDataPath(*info.create_query, shard_num_in_backup, replica_num_in_backup);
for (auto & [path_in_backup, backup_entry] : data_backup)
res.emplace_back(data_path + path_in_backup, std::move(backup_entry));
}
}
}
/// A backup cannot be empty.
if (res.empty())
throw Exception("Backup must not be empty", ErrorCodes::BACKUP_IS_EMPTY);
return res;
}
private:
void calculateShardNumAndReplicaNumInBackup()
{
size_t shard_num = 0;
@ -187,8 +203,6 @@ namespace
void prepareToBackupTable(const DatabaseAndTableName & table_name_, const DatabaseAndTable & table_, const ASTs & partitions_)
{
context->checkAccess(AccessType::SHOW_TABLES, table_name_.first, table_name_.second);
const auto & database = table_.first;
const auto & storage = table_.second;
@ -206,22 +220,72 @@ namespace
/// Make a create query for this table.
auto create_query = prepareCreateQueryForBackup(database->getCreateTableQuery(table_name_.second, context));
String data_path = PathsInBackup::getDataPath(*create_query, shard_num_in_backup, replica_num_in_backup);
bool has_data = storage->hasDataToBackup() && !backup_settings.structure_only;
if (has_data)
{
/// We check for SELECT privilege only if we're going to read data from the table.
context->checkAccess(AccessType::SELECT, table_name_.first, table_name_.second);
}
String zk_path;
BackupEntries data = prepareToBackupTableData(table_name_, storage, partitions_, data_path, zk_path);
CreateTableInfo info;
TableInfo info;
info.table_name = table_name_;
info.create_query = create_query;
info.storage = storage;
info.partitions = partitions_;
info.has_data = has_data;
info.data = std::move(data);
info.data_path = std::move(data_path);
info.zk_path = std::move(zk_path);
tables[name_in_backup] = std::move(info);
}
BackupEntries prepareToBackupTableData(const DatabaseAndTableName & table_name_, const StoragePtr & storage_, const ASTs & partitions_, const String & data_path, String & zk_path)
{
zk_path.clear();
const StorageReplicatedMergeTree * replicated_table = typeid_cast<const StorageReplicatedMergeTree *>(storage_.get());
bool has_data = (storage_->hasDataToBackup() || replicated_table) && !backup_settings.structure_only;
if (!has_data)
return {};
BackupEntries data = storage_->backupData(context, partitions_);
if (!replicated_table)
return data;
zk_path = replicated_table->getZooKeeperName() + replicated_table->getZooKeeperPath();
backup_coordination->addReplicatedTableDataPath(zk_path, data_path);
std::unordered_map<String, SipHash> parts;
for (const auto & [relative_path, backup_entry] : data)
{
size_t slash_pos = relative_path.find('/');
if (slash_pos != String::npos)
{
String part_name = relative_path.substr(0, slash_pos);
if (MergeTreePartInfo::tryParsePartName(part_name, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING))
{
auto & hash = parts[part_name];
if (relative_path.ends_with(".bin"))
{
auto checksum = backup_entry->getChecksum();
hash.update(relative_path);
hash.update(backup_entry->getSize());
hash.update(*checksum);
}
}
}
}
std::vector<IBackupCoordination::PartNameAndChecksum> part_names_and_checksums;
part_names_and_checksums.reserve(parts.size());
for (auto & [part_name, hash] : parts)
{
UInt128 checksum;
hash.get128(checksum);
auto & part_name_and_checksum = part_names_and_checksums.emplace_back();
part_name_and_checksum.part_name = part_name;
part_name_and_checksum.checksum = checksum;
}
backup_coordination->addReplicatedTablePartNames(backup_settings.host_id, table_name_, zk_path, part_names_and_checksums);
return data;
}
/// Prepares to restore a database and all tables in it.
void prepareToBackupDatabase(const String & database_name_, const std::set<String> & except_list_)
{
@ -231,8 +295,6 @@ namespace
void prepareToBackupDatabase(const String & database_name_, const DatabasePtr & database_, const std::set<String> & except_list_)
{
context->checkAccess(AccessType::SHOW_DATABASES, database_name_);
/// Check that we are not trying to restore the same database again.
String name_in_backup = renaming_settings.getNewDatabaseName(database_name_);
if (databases.contains(name_in_backup))
@ -244,7 +306,7 @@ namespace
/// Make a create query for this database.
auto create_query = prepareCreateQueryForBackup(database_->getCreateDatabaseQuery());
CreateDatabaseInfo info;
DatabaseInfo info;
info.create_query = create_query;
databases[name_in_backup] = std::move(info);
}
@ -298,36 +360,77 @@ namespace
return {metadata_path, std::move(metadata_entry)};
}
/// Information which is used to make an instance of RestoreTableFromBackupTask.
struct CreateTableInfo
struct TableInfo;
void appendBackupEntriesForData(BackupEntries & res, const TableInfo & info) const
{
if (info.zk_path.empty())
{
for (auto & [relative_path, backup_entry] : info.data)
res.emplace_back(info.data_path + relative_path, backup_entry);
return;
}
Strings data_paths = backup_coordination->getReplicatedTableDataPaths(info.zk_path);
Strings part_names = backup_coordination->getReplicatedTablePartNames(backup_settings.host_id, info.table_name, info.zk_path);
std::unordered_set<std::string_view> part_names_set{part_names.begin(), part_names.end()};
for (auto & [relative_path, backup_entry] : info.data)
{
size_t slash_pos = relative_path.find('/');
if (slash_pos != String::npos)
{
String part_name = relative_path.substr(0, slash_pos);
if (MergeTreePartInfo::tryParsePartName(part_name, MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING))
{
if (!part_names_set.contains(part_name))
continue;
for (const auto & data_path : data_paths)
res.emplace_back(data_path + relative_path, backup_entry);
continue;
}
}
res.emplace_back(info.data_path + relative_path, backup_entry);
}
}
/// Information which is used to make an instance of RestoreTableFromBackupTask.
struct TableInfo
{
DatabaseAndTableName table_name;
ASTPtr create_query;
StoragePtr storage;
ASTs partitions;
bool has_data = false;
BackupEntries data;
String data_path;
String zk_path;
};
/// Information which is used to make an instance of RestoreDatabaseFromBackupTask.
struct CreateDatabaseInfo
struct DatabaseInfo
{
ASTPtr create_query;
};
ContextPtr context;
BackupSettings backup_settings;
std::shared_ptr<IBackupCoordination> backup_coordination;
size_t shard_num_in_backup = 0;
size_t replica_num_in_backup = 0;
DDLRenamingSettings renaming_settings;
std::unordered_map<String /* db_name_in_backup */, CreateDatabaseInfo> databases;
std::map<DatabaseAndTableName /* table_name_in_backup */, CreateTableInfo> tables;
std::unordered_map<String /* db_name_in_backup */, DatabaseInfo> databases;
std::map<DatabaseAndTableName /* table_name_in_backup */, TableInfo> tables;
};
}
BackupEntries makeBackupEntries(const ContextPtr & context, const Elements & elements, const BackupSettings & backup_settings)
BackupEntries makeBackupEntries(
const ContextPtr & context,
const Elements & elements,
const BackupSettings & backup_settings,
std::shared_ptr<IBackupCoordination> backup_coordination,
std::chrono::seconds timeout_for_other_nodes_to_prepare)
{
BackupEntriesBuilder builder{context, backup_settings};
builder.prepare(elements);
BackupEntriesBuilder builder{context, backup_settings, backup_coordination};
builder.prepare(elements, timeout_for_other_nodes_to_prepare);
return builder.makeBackupEntries();
}
@ -400,4 +503,48 @@ void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries
backup->finalizeWriting();
}
/// Returns access required to execute BACKUP query.
AccessRightsElements getRequiredAccessToBackup(const ASTBackupQuery::Elements & elements, const BackupSettings & backup_settings)
{
AccessRightsElements required_access;
for (const auto & element : elements)
{
switch (element.type)
{
case ASTBackupQuery::TABLE:
{
if (element.is_temp_db)
break;
AccessFlags flags = AccessType::SHOW_TABLES;
if (!backup_settings.structure_only)
flags |= AccessType::SELECT;
required_access.emplace_back(flags, element.name.first, element.name.second);
break;
}
case ASTBackupQuery::DATABASE:
{
if (element.is_temp_db)
break;
AccessFlags flags = AccessType::SHOW_TABLES | AccessType::SHOW_DATABASES;
if (!backup_settings.structure_only)
flags |= AccessType::SELECT;
required_access.emplace_back(flags, element.name.first);
/// TODO: It's better to process `element.except_list` somehow.
break;
}
case ASTBackupQuery::ALL_DATABASES:
{
AccessFlags flags = AccessType::SHOW_TABLES | AccessType::SHOW_DATABASES;
if (!backup_settings.structure_only)
flags |= AccessType::SELECT;
required_access.emplace_back(flags);
/// TODO: It's better to process `element.except_list` somehow.
break;
}
}
}
return required_access;
}
}

View File

@ -10,16 +10,26 @@ class IBackup;
using BackupPtr = std::shared_ptr<const IBackup>;
using BackupMutablePtr = std::shared_ptr<IBackup>;
class IBackupEntry;
using BackupEntryPtr = std::unique_ptr<IBackupEntry>;
using BackupEntryPtr = std::shared_ptr<const IBackupEntry>;
using BackupEntries = std::vector<std::pair<String, BackupEntryPtr>>;
struct BackupSettings;
class IBackupCoordination;
class AccessRightsElements;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
/// Prepares backup entries.
BackupEntries makeBackupEntries(const ContextPtr & context, const ASTBackupQuery::Elements & elements, const BackupSettings & backup_settings);
BackupEntries makeBackupEntries(
const ContextPtr & context,
const ASTBackupQuery::Elements & elements,
const BackupSettings & backup_settings,
std::shared_ptr<IBackupCoordination> backup_coordination,
std::chrono::seconds timeout_for_other_nodes_to_prepare = std::chrono::seconds::zero());
/// Write backup entries to an opened backup.
void writeBackupEntries(BackupMutablePtr backup, BackupEntries && backup_entries, ThreadPool & thread_pool);
/// Returns access required to execute BACKUP query.
AccessRightsElements getRequiredAccessToBackup(const ASTBackupQuery::Elements & elements, const BackupSettings & backup_settings);
}

View File

@ -4,6 +4,8 @@
#include <Backups/BackupSettings.h>
#include <Backups/BackupUtils.h>
#include <Backups/IBackupEntry.h>
#include <Backups/BackupCoordinationDistributed.h>
#include <Backups/BackupCoordinationLocal.h>
#include <Backups/IRestoreTask.h>
#include <Backups/RestoreCoordinationDistributed.h>
#include <Backups/RestoreCoordinationLocal.h>
@ -21,160 +23,18 @@
namespace DB
{
namespace ErrorCodes
{
extern const int QUERY_IS_PROHIBITED;
extern const int LOGICAL_ERROR;
}
namespace
{
void checkNoMultipleReplicas(const std::vector<Strings> & cluster_host_ids, size_t only_shard_num)
{
if (only_shard_num)
{
if ((only_shard_num <= cluster_host_ids.size()) && (cluster_host_ids[only_shard_num - 1].size() > 1))
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Backup of multiple replicas is disabled. Choose one replica with the replica_num setting or specify allow_storing_multiple_replicas=true");
}
for (const auto & shard : cluster_host_ids)
{
if (shard.size() > 1)
throw Exception(ErrorCodes::QUERY_IS_PROHIBITED, "Backup of multiple replicas is disabled. Choose one replica with the replica_num setting or specify allow_storing_multiple_replicas=true");
}
}
void executeBackupImpl(const ASTBackupQuery & query, const UUID & backup_uuid, const ContextPtr & context, ThreadPool & thread_pool)
{
const auto backup_info = BackupInfo::fromAST(*query.backup_name);
const auto backup_settings = BackupSettings::fromBackupQuery(query);
std::shared_ptr<ASTBackupQuery> new_query = std::static_pointer_cast<ASTBackupQuery>(query.clone());
BackupFactory::CreateParams backup_create_params;
backup_create_params.open_mode = IBackup::OpenMode::WRITE;
backup_create_params.context = context;
backup_create_params.backup_info = backup_info;
backup_create_params.base_backup_info = backup_settings.base_backup_info;
backup_create_params.compression_method = backup_settings.compression_method;
backup_create_params.compression_level = backup_settings.compression_level;
backup_create_params.password = backup_settings.password;
backup_create_params.backup_uuid = backup_uuid;
backup_create_params.is_internal_backup = backup_settings.internal;
backup_create_params.coordination_zk_path = backup_settings.coordination_zk_path;
ClusterPtr cluster;
if (!query.cluster.empty())
{
new_query->cluster = context->getMacros()->expand(query.cluster);
cluster = context->getCluster(new_query->cluster);
auto new_backup_settings = backup_settings;
new_backup_settings.cluster_host_ids = cluster->getHostIDs();
if (!backup_settings.allow_storing_multiple_replicas && !backup_settings.replica_num)
checkNoMultipleReplicas(new_backup_settings.cluster_host_ids, backup_settings.shard_num);
if (backup_settings.coordination_zk_path.empty())
{
String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups");
new_backup_settings.coordination_zk_path
= query.cluster.empty() ? "" : (root_zk_path + "/backup-" + toString(backup_uuid));
backup_create_params.coordination_zk_path = new_backup_settings.coordination_zk_path;
}
new_backup_settings.copySettingsToQuery(*new_query);
}
BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params);
if (!query.cluster.empty())
{
DDLQueryOnClusterParams params;
params.cluster = cluster;
params.only_shard_num = backup_settings.shard_num;
params.only_replica_num = backup_settings.replica_num;
auto res = executeDDLQueryOnCluster(new_query, context, params);
PullingPipelineExecutor executor(res.pipeline);
Block block;
while (executor.pull(block));
backup->finalizeWriting();
}
else
{
new_query->setDatabase(context->getCurrentDatabase());
auto backup_entries = makeBackupEntries(context, new_query->elements, backup_settings);
writeBackupEntries(backup, std::move(backup_entries), thread_pool);
}
}
void executeRestoreImpl(const ASTBackupQuery & query, const UUID & restore_uuid, ContextMutablePtr context, ThreadPool & thread_pool)
{
const auto backup_info = BackupInfo::fromAST(*query.backup_name);
const auto restore_settings = RestoreSettings::fromRestoreQuery(query);
bool is_internal_restore = restore_settings.internal;
std::shared_ptr<IRestoreCoordination> restore_coordination;
SCOPE_EXIT({
if (!is_internal_restore && restore_coordination)
restore_coordination->drop();
});
std::shared_ptr<ASTBackupQuery> new_query = std::static_pointer_cast<ASTBackupQuery>(query.clone());
ClusterPtr cluster;
if (!query.cluster.empty())
{
new_query->cluster = context->getMacros()->expand(query.cluster);
cluster = context->getCluster(new_query->cluster);
auto new_restore_settings = restore_settings;
new_restore_settings.cluster_host_ids = cluster->getHostIDs();
if (new_restore_settings.coordination_zk_path.empty())
{
String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups");
new_restore_settings.coordination_zk_path
= query.cluster.empty() ? "" : (root_zk_path + "/restore-" + toString(restore_uuid));
}
new_restore_settings.copySettingsToQuery(*new_query);
}
if (!restore_settings.coordination_zk_path.empty())
restore_coordination = std::make_shared<RestoreCoordinationDistributed>(restore_settings.coordination_zk_path, [context=context] { return context->getZooKeeper(); });
else
restore_coordination = std::make_shared<RestoreCoordinationLocal>();
if (!query.cluster.empty())
{
DDLQueryOnClusterParams params;
params.cluster = cluster;
params.only_shard_num = restore_settings.shard_num;
params.only_replica_num = restore_settings.replica_num;
auto res = executeDDLQueryOnCluster(new_query, context, params);
PullingPipelineExecutor executor(res.pipeline);
Block block;
while (executor.pull(block));
}
else
{
new_query->setDatabase(context->getCurrentDatabase());
BackupFactory::CreateParams backup_open_params;
backup_open_params.open_mode = IBackup::OpenMode::READ;
backup_open_params.context = context;
backup_open_params.backup_info = backup_info;
backup_open_params.base_backup_info = restore_settings.base_backup_info;
backup_open_params.password = restore_settings.password;
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
auto timeout_for_restoring_metadata = std::chrono::seconds{context->getConfigRef().getUInt("backups.restore_metadata_timeout", 0)};
auto restore_tasks = makeRestoreTasks(context, backup, new_query->elements, restore_settings, restore_coordination, timeout_for_restoring_metadata);
executeRestoreTasks(std::move(restore_tasks), thread_pool, restore_settings, restore_coordination, timeout_for_restoring_metadata);
}
}
}
BackupsWorker::BackupsWorker(size_t num_backup_threads, size_t num_restore_threads)
: backups_thread_pool(num_backup_threads)
, restores_thread_pool(num_restore_threads)
: backups_thread_pool(num_backup_threads, /* max_free_threads = */ 0, num_backup_threads)
, restores_thread_pool(num_restore_threads, /* max_free_threads = */ 0, num_restore_threads)
, log(&Poco::Logger::get("BackupsWorker"))
{
/// We set max_free_threads = 0 because we don't want to keep any threads if there is no BACKUP or RESTORE query running right now.
}
UUID BackupsWorker::start(const ASTPtr & backup_or_restore_query, ContextMutablePtr context)
@ -186,129 +46,320 @@ UUID BackupsWorker::start(const ASTPtr & backup_or_restore_query, ContextMutable
return startRestoring(backup_or_restore_query, context);
}
UUID BackupsWorker::startMakingBackup(const ASTPtr & query, const ContextPtr & context)
{
UUID uuid = UUIDHelpers::generateV4();
UUID backup_uuid = UUIDHelpers::generateV4();
auto backup_query = std::static_pointer_cast<ASTBackupQuery>(query->clone());
auto backup_info = BackupInfo::fromAST(*backup_query->backup_name);
auto backup_settings = BackupSettings::fromBackupQuery(*backup_query);
BackupInfo backup_info;
BackupSettings backup_settings;
addInfo(backup_uuid, backup_info.toString(), BackupStatus::MAKING_BACKUP, backup_settings.internal);
std::shared_ptr<IBackupCoordination> backup_coordination;
SCOPE_EXIT({
if (backup_coordination && !backup_settings.internal)
backup_coordination->drop();
});
BackupMutablePtr backup;
ContextPtr cloned_context;
bool on_cluster = !backup_query->cluster.empty();
std::shared_ptr<BlockIO> on_cluster_io;
try
{
const ASTBackupQuery & backup_query = typeid_cast<const ASTBackupQuery &>(*query);
backup_info = BackupInfo::fromAST(*backup_query.backup_name);
backup_settings = BackupSettings::fromBackupQuery(backup_query);
auto access_to_check = getRequiredAccessToBackup(backup_query->elements, backup_settings);
if (!on_cluster)
context->checkAccess(access_to_check);
ClusterPtr cluster;
if (on_cluster)
{
backup_query->cluster = context->getMacros()->expand(backup_query->cluster);
cluster = context->getCluster(backup_query->cluster);
backup_settings.cluster_host_ids = cluster->getHostIDs();
if (backup_settings.coordination_zk_path.empty())
{
String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups");
backup_settings.coordination_zk_path = root_zk_path + "/backup-" + toString(backup_uuid);
}
backup_settings.copySettingsToQuery(*backup_query);
}
if (!backup_settings.coordination_zk_path.empty())
backup_coordination = std::make_shared<BackupCoordinationDistributed>(
backup_settings.coordination_zk_path,
[global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); });
else
backup_coordination = std::make_shared<BackupCoordinationLocal>();
BackupFactory::CreateParams backup_create_params;
backup_create_params.open_mode = IBackup::OpenMode::WRITE;
backup_create_params.context = context;
backup_create_params.backup_info = backup_info;
backup_create_params.base_backup_info = backup_settings.base_backup_info;
backup_create_params.compression_method = backup_settings.compression_method;
backup_create_params.compression_level = backup_settings.compression_level;
backup_create_params.password = backup_settings.password;
backup_create_params.backup_uuid = backup_uuid;
backup_create_params.is_internal_backup = backup_settings.internal;
backup_create_params.backup_coordination = backup_coordination;
backup = BackupFactory::instance().createBackup(backup_create_params);
ContextMutablePtr mutable_context;
if (on_cluster || backup_settings.async)
cloned_context = mutable_context = Context::createCopy(context);
else
cloned_context = context; /// No need to clone context
if (on_cluster)
{
DDLQueryOnClusterParams params;
params.cluster = cluster;
params.only_shard_num = backup_settings.shard_num;
params.only_replica_num = backup_settings.replica_num;
params.access_to_check = access_to_check;
mutable_context->setSetting("distributed_ddl_task_timeout", -1); // No timeout
mutable_context->setSetting("distributed_ddl_output_mode", Field{"throw"});
auto res = executeDDLQueryOnCluster(backup_query, mutable_context, params);
on_cluster_io = std::make_shared<BlockIO>(std::move(res));
}
}
catch (...)
{
setStatus(backup_uuid, BackupStatus::FAILED_TO_BACKUP);
throw;
}
{
Info info;
info.uuid = uuid;
info.backup_name = backup_info.toString();
info.status = BackupStatus::MAKING_BACKUP;
info.status_changed_time = time(nullptr);
info.internal = backup_settings.internal;
std::lock_guard lock{infos_mutex};
infos.emplace(uuid, std::move(info));
}
auto job = [this, query, context, uuid]
auto job = [this,
backup,
backup_uuid,
backup_query,
backup_settings,
backup_coordination,
on_cluster_io,
cloned_context](bool in_separate_thread)
{
try
{
const ASTBackupQuery & backup_query = typeid_cast<const ASTBackupQuery &>(*query);
executeBackupImpl(backup_query, uuid, context, backups_thread_pool);
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = BackupStatus::BACKUP_COMPLETE;
info.status_changed_time = time(nullptr);
if (on_cluster_io)
{
PullingPipelineExecutor executor(on_cluster_io->pipeline);
Block block;
while (executor.pull(block))
;
backup->finalizeWriting();
}
else
{
std::optional<CurrentThread::QueryScope> query_scope;
if (in_separate_thread)
query_scope.emplace(cloned_context);
backup_query->setDatabase(cloned_context->getCurrentDatabase());
auto timeout_for_preparing = std::chrono::seconds{cloned_context->getConfigRef().getInt("backups.backup_prepare_timeout", -1)};
auto backup_entries
= makeBackupEntries(cloned_context, backup_query->elements, backup_settings, backup_coordination, timeout_for_preparing);
writeBackupEntries(backup, std::move(backup_entries), backups_thread_pool);
}
setStatus(backup_uuid, BackupStatus::BACKUP_COMPLETE);
}
catch (...)
{
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = BackupStatus::FAILED_TO_BACKUP;
info.status_changed_time = time(nullptr);
info.error_message = getCurrentExceptionMessage(false);
info.exception = std::current_exception();
setStatus(backup_uuid, BackupStatus::FAILED_TO_BACKUP);
if (!in_separate_thread)
throw;
}
};
if (backup_settings.async)
{
backups_thread_pool.scheduleOrThrowOnError(job);
}
backups_thread_pool.scheduleOrThrowOnError([job] { job(true); });
else
{
job();
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
if (info.status == BackupStatus::FAILED_TO_BACKUP)
std::rethrow_exception(info.exception);
}
job(false);
return uuid;
return backup_uuid;
}
UUID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePtr context)
{
UUID uuid = UUIDHelpers::generateV4();
UUID restore_uuid = UUIDHelpers::generateV4();
auto restore_query = std::static_pointer_cast<ASTBackupQuery>(query->clone());
auto backup_info = BackupInfo::fromAST(*restore_query->backup_name);
auto restore_settings = RestoreSettings::fromRestoreQuery(*restore_query);
BackupInfo backup_info;
RestoreSettings restore_settings;
addInfo(restore_uuid, backup_info.toString(), BackupStatus::RESTORING, restore_settings.internal);
std::shared_ptr<IRestoreCoordination> restore_coordination;
SCOPE_EXIT({
if (restore_coordination && !restore_settings.internal)
restore_coordination->drop();
});
ContextMutablePtr cloned_context;
std::shared_ptr<BlockIO> on_cluster_io;
bool on_cluster = !restore_query->cluster.empty();
try
{
const ASTBackupQuery & restore_query = typeid_cast<const ASTBackupQuery &>(*query);
backup_info = BackupInfo::fromAST(*restore_query.backup_name);
restore_settings = RestoreSettings::fromRestoreQuery(restore_query);
auto access_to_check = getRequiredAccessToRestore(restore_query->elements, restore_settings);
if (!on_cluster)
context->checkAccess(access_to_check);
ClusterPtr cluster;
if (on_cluster)
{
restore_query->cluster = context->getMacros()->expand(restore_query->cluster);
cluster = context->getCluster(restore_query->cluster);
restore_settings.cluster_host_ids = cluster->getHostIDs();
if (restore_settings.coordination_zk_path.empty())
{
String root_zk_path = context->getConfigRef().getString("backups.zookeeper_path", "/clickhouse/backups");
restore_settings.coordination_zk_path = root_zk_path + "/restore-" + toString(restore_uuid);
}
restore_settings.copySettingsToQuery(*restore_query);
}
if (!restore_settings.coordination_zk_path.empty())
restore_coordination = std::make_shared<RestoreCoordinationDistributed>(
restore_settings.coordination_zk_path,
[global_context = context->getGlobalContext()] { return global_context->getZooKeeper(); });
else
restore_coordination = std::make_shared<RestoreCoordinationLocal>();
if (on_cluster || restore_settings.async)
cloned_context = Context::createCopy(context);
else
cloned_context = context; /// No need to clone context
if (on_cluster)
{
DDLQueryOnClusterParams params;
params.cluster = cluster;
params.only_shard_num = restore_settings.shard_num;
params.only_replica_num = restore_settings.replica_num;
params.access_to_check = access_to_check;
cloned_context->setSetting("distributed_ddl_task_timeout", -1); // No timeout
cloned_context->setSetting("distributed_ddl_output_mode", Field{"throw"});
auto res = executeDDLQueryOnCluster(restore_query, cloned_context, params);
on_cluster_io = std::make_shared<BlockIO>(std::move(res));
}
}
catch (...)
{
setStatus(restore_uuid, BackupStatus::FAILED_TO_RESTORE);
throw;
}
{
Info info;
info.uuid = uuid;
info.backup_name = backup_info.toString();
info.status = BackupStatus::RESTORING;
info.status_changed_time = time(nullptr);
info.internal = restore_settings.internal;
std::lock_guard lock{infos_mutex};
infos.emplace(uuid, std::move(info));
}
auto job = [this, query, context, uuid]
auto job = [this,
backup_info,
restore_uuid,
restore_query,
restore_settings,
restore_coordination,
on_cluster_io,
cloned_context](bool in_separate_thread)
{
try
{
const ASTBackupQuery & restore_query = typeid_cast<const ASTBackupQuery &>(*query);
executeRestoreImpl(restore_query, uuid, context, restores_thread_pool);
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = BackupStatus::RESTORED;
info.status_changed_time = time(nullptr);
if (on_cluster_io)
{
PullingPipelineExecutor executor(on_cluster_io->pipeline);
Block block;
while (executor.pull(block))
;
}
else
{
std::optional<CurrentThread::QueryScope> query_scope;
if (in_separate_thread)
query_scope.emplace(cloned_context);
restore_query->setDatabase(cloned_context->getCurrentDatabase());
BackupFactory::CreateParams backup_open_params;
backup_open_params.open_mode = IBackup::OpenMode::READ;
backup_open_params.context = cloned_context;
backup_open_params.backup_info = backup_info;
backup_open_params.base_backup_info = restore_settings.base_backup_info;
backup_open_params.password = restore_settings.password;
BackupPtr backup = BackupFactory::instance().createBackup(backup_open_params);
auto timeout_for_restoring_metadata
= std::chrono::seconds{cloned_context->getConfigRef().getInt("backups.restore_metadata_timeout", -1)};
auto restore_tasks = makeRestoreTasks(
cloned_context, backup, restore_query->elements, restore_settings, restore_coordination, timeout_for_restoring_metadata);
restoreMetadata(restore_tasks, restore_settings, restore_coordination, timeout_for_restoring_metadata);
restoreData(restore_tasks, restores_thread_pool);
}
setStatus(restore_uuid, BackupStatus::RESTORED);
}
catch (...)
{
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = BackupStatus::FAILED_TO_RESTORE;
info.status_changed_time = time(nullptr);
info.error_message = getCurrentExceptionMessage(false);
info.exception = std::current_exception();
setStatus(restore_uuid, BackupStatus::FAILED_TO_RESTORE);
if (!in_separate_thread)
throw;
}
};
if (restore_settings.async)
{
restores_thread_pool.scheduleOrThrowOnError(job);
}
backups_thread_pool.scheduleOrThrowOnError([job] { job(true); });
else
{
job();
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
if (info.status == BackupStatus::FAILED_TO_RESTORE)
std::rethrow_exception(info.exception);
}
job(false);
return uuid;
return restore_uuid;
}
void BackupsWorker::wait(const UUID & backup_or_restore_uuid)
void BackupsWorker::addInfo(const UUID & uuid, const String & backup_name, BackupStatus status, bool internal)
{
Info info;
info.uuid = uuid;
info.backup_name = backup_name;
info.status = status;
info.status_changed_time = time(nullptr);
info.internal = internal;
std::lock_guard lock{infos_mutex};
infos[uuid] = std::move(info);
}
void BackupsWorker::setStatus(const UUID & uuid, BackupStatus status)
{
std::lock_guard lock{infos_mutex};
auto & info = infos.at(uuid);
info.status = status;
info.status_changed_time = time(nullptr);
if ((status == BackupStatus::FAILED_TO_BACKUP) || (status == BackupStatus::FAILED_TO_RESTORE))
{
info.error_message = getCurrentExceptionMessage(false);
info.exception = std::current_exception();
}
switch (status)
{
case BackupStatus::BACKUP_COMPLETE:
LOG_INFO(log, "{} {} was created successfully", (info.internal ? "Internal backup" : "Backup"), info.backup_name);
break;
case BackupStatus::FAILED_TO_BACKUP:
LOG_ERROR(log, "Failed to create {} {}", (info.internal ? "internal backup" : "backup"), info.backup_name);
break;
case BackupStatus::RESTORED:
LOG_INFO(log, "Restored from {} {} successfully", (info.internal ? "internal backup" : "backup"), info.backup_name);
break;
case BackupStatus::FAILED_TO_RESTORE:
LOG_ERROR(log, "Failed to restore from {} {}", (info.internal ? "internal backup" : "backup"), info.backup_name);
break;
default:
break;
}
}
void BackupsWorker::wait(const UUID & backup_or_restore_uuid, bool rethrow_exception)
{
std::unique_lock lock{infos_mutex};
status_changed.wait(lock, [&]
@ -316,7 +367,10 @@ void BackupsWorker::wait(const UUID & backup_or_restore_uuid)
auto it = infos.find(backup_or_restore_uuid);
if (it == infos.end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "BackupsWorker: Unknown UUID {}", toString(backup_or_restore_uuid));
auto current_status = it->second.status;
const auto & info = it->second;
auto current_status = info.status;
if (rethrow_exception && ((current_status == BackupStatus::FAILED_TO_BACKUP) || (current_status == BackupStatus::FAILED_TO_RESTORE)))
std::rethrow_exception(info.exception);
return (current_status == BackupStatus::BACKUP_COMPLETE) || (current_status == BackupStatus::RESTORED);
});
}
@ -345,10 +399,10 @@ void BackupsWorker::shutdown()
size_t num_active_restores = restores_thread_pool.active();
if (!num_active_backups && !num_active_restores)
return;
LOG_INFO(&Poco::Logger::get("BackupsWorker"), "Waiting for {} backup and {} restore tasks to be finished", num_active_backups, num_active_restores);
LOG_INFO(log, "Waiting for {} backup and {} restore tasks to be finished", num_active_backups, num_active_restores);
backups_thread_pool.wait();
restores_thread_pool.wait();
LOG_INFO(&Poco::Logger::get("BackupsWorker"), "All backup and restore tasks have finished");
LOG_INFO(log, "All backup and restore tasks have finished");
}
}

View File

@ -26,7 +26,7 @@ public:
/// Waits until a BACKUP or RESTORE query started by start() is finished.
/// The function returns immediately if the operation is already finished.
void wait(const UUID & backup_or_restore_uuid);
void wait(const UUID & backup_or_restore_uuid, bool rethrow_exception = true);
/// Information about executing a BACKUP or RESTORE query started by calling start().
struct Info
@ -54,12 +54,16 @@ private:
UUID startMakingBackup(const ASTPtr & query, const ContextPtr & context);
UUID startRestoring(const ASTPtr & query, ContextMutablePtr context);
void addInfo(const UUID & uuid, const String & backup_name, BackupStatus status, bool internal);
void setStatus(const UUID & uuid, BackupStatus status);
ThreadPool backups_thread_pool;
ThreadPool restores_thread_pool;
std::unordered_map<UUID, Info> infos;
std::condition_variable status_changed;
mutable std::mutex infos_mutex;
const Poco::Logger * log;
};
}

View File

@ -303,14 +303,14 @@ void DDLRenamingSettings::setFromBackupQuery(const ASTBackupQuery::Elements & ba
{
const String & table_name = element.name.second;
String database_name = element.name.first;
if (element.name_is_in_temp_db)
if (element.is_temp_db)
database_name = DatabaseCatalog::TEMPORARY_DATABASE;
assert(!table_name.empty());
assert(!database_name.empty());
const String & new_table_name = element.new_name.second;
String new_database_name = element.new_name.first;
if (element.new_name_is_in_temp_db)
if (element.is_temp_db)
new_database_name = DatabaseCatalog::TEMPORARY_DATABASE;
assert(!new_table_name.empty());
assert(!new_database_name.empty());
@ -322,12 +322,12 @@ void DDLRenamingSettings::setFromBackupQuery(const ASTBackupQuery::Elements & ba
case ASTBackupQuery::DATABASE:
{
String database_name = element.name.first;
if (element.name_is_in_temp_db)
if (element.is_temp_db)
database_name = DatabaseCatalog::TEMPORARY_DATABASE;
assert(!database_name.empty());
String new_database_name = element.new_name.first;
if (element.new_name_is_in_temp_db)
if (element.is_temp_db)
new_database_name = DatabaseCatalog::TEMPORARY_DATABASE;
assert(!new_database_name.empty());

View File

@ -8,7 +8,7 @@
namespace DB
{
class IBackupEntry;
using BackupEntryPtr = std::unique_ptr<IBackupEntry>;
using BackupEntryPtr = std::shared_ptr<const IBackupEntry>;
/// Represents a backup, i.e. a storage of BackupEntries which can be accessed by their names.
/// A backup can be either incremental or non-incremental. An incremental backup doesn't store

View File

@ -6,6 +6,7 @@
namespace DB
{
using DatabaseAndTableName = std::pair<String, String>;
/// Keeps information about files contained in a backup.
class IBackupCoordination
@ -13,6 +14,44 @@ class IBackupCoordination
public:
virtual ~IBackupCoordination() = default;
/// Adds a data path in backup for a replicated table.
/// Multiple replicas of the replicated table call this function and then all the added paths can be returned by call of the function
/// getReplicatedTableDataPaths().
virtual void addReplicatedTableDataPath(const String & table_zk_path, const String & table_data_path) = 0;
struct PartNameAndChecksum
{
String part_name;
UInt128 checksum;
};
/// Adds part names which a specified replica of a replicated table is going to put to the backup.
/// Multiple replicas of the replicated table call this function and then the added part names can be returned by call of the function
/// getReplicatedTablePartNames().
/// Checksums are used only to control that parts under the same names on different replicas are the same.
virtual void addReplicatedTablePartNames(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
= 0;
/// Sets that a specified host finished preparations for copying the backup's files, successfully or not.
/// `error_message` should be set to true if it was not successful.
virtual void finishPreparing(const String & host_id, const String & error_message = {}) = 0;
/// Waits for a specified time for specified hosts to finish preparation for copying the backup's files.
virtual void
waitForAllHostsPrepared(const Strings & host_ids, std::chrono::seconds timeout = std::chrono::seconds(-1) /* no timeout */) const = 0;
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedTableDataPath()).
virtual Strings getReplicatedTableDataPaths(const String & table_zk_path) const = 0;
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
/// This is the same list as it was added by call of the function addReplicatedTablePartNames() but without duplications and without
/// parts covered by another parts.
virtual Strings getReplicatedTablePartNames(const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path) const = 0;
struct FileInfo
{
String file_name;

View File

@ -26,7 +26,7 @@ public:
virtual std::unique_ptr<SeekableReadBuffer> getReadBuffer() const = 0;
};
using BackupEntryPtr = std::unique_ptr<IBackupEntry>;
using BackupEntryPtr = std::shared_ptr<const IBackupEntry>;
using BackupEntries = std::vector<std::pair<String, BackupEntryPtr>>;
}

View File

@ -30,11 +30,18 @@ public:
= 0;
/// Wait for another host to create a table in a replicated database.
virtual void waitForCreatingTableInReplicatedDB(
virtual void waitForTableCreatedInReplicatedDB(
const String & database_name,
const String & database_zk_path,
const String & table_name,
std::chrono::seconds timeout = std::chrono::seconds::zero())
std::chrono::seconds timeout = std::chrono::seconds(-1) /* no timeout */)
= 0;
/// Adds a path in backup used by a replicated table.
/// This function can be called multiple times for the same table with different `host_id`, and in that case
/// getReplicatedTableDataPath() will choose `data_path_in_backup` with the lexicographycally first `host_id`.
virtual void addReplicatedTableDataPath(
const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path, const String & data_path_in_backup)
= 0;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
@ -42,14 +49,8 @@ public:
virtual void finishRestoringMetadata(const String & host_id, const String & error_message = {}) = 0;
/// Waits for a specified list of hosts to finish restoring their metadata.
virtual void waitForAllHostsToRestoreMetadata(const Strings & host_ids, std::chrono::seconds timeout = std::chrono::seconds::zero()) const = 0;
/// Sets path in backup used by a replicated table.
/// This function can be called multiple times for the same table with different `host_id`, and in that case
/// getReplicatedTableDataPath() will choose `data_path_in_backup` with the lexicographycally first `host_id`.
virtual void setReplicatedTableDataPath(
const String & host_id, const DatabaseAndTableName & table_name, const String & table_zk_path, const String & data_path_in_backup)
= 0;
virtual void waitForAllHostsRestoredMetadata(
const Strings & host_ids, std::chrono::seconds timeout = std::chrono::seconds(-1) /* no timeout */) const = 0;
/// Gets path in backup used by a replicated table.
virtual String getReplicatedTableDataPath(const String & table_zk_path) const = 0;

View File

@ -15,12 +15,189 @@ namespace DB
namespace ErrorCodes
{
extern const int FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE;
extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE;
}
namespace
{
struct TableInReplicatedDatabaseStatus
struct ReplicatedTableDataPath
{
String host_id;
DatabaseAndTableName table_name;
String data_path_in_backup;
String serialize() const
{
WriteBufferFromOwnString out;
writeBinary(host_id, out);
writeBinary(table_name.first, out);
writeBinary(table_name.second, out);
writeBinary(data_path_in_backup, out);
return out.str();
}
static ReplicatedTableDataPath deserialize(const String & str)
{
ReadBufferFromString in{str};
ReplicatedTableDataPath res;
readBinary(res.host_id, in);
readBinary(res.table_name.first, in);
readBinary(res.table_name.second, in);
readBinary(res.data_path_in_backup, in);
return res;
}
};
}
class RestoreCoordinationDistributed::ReplicatedDatabasesMetadataSync
{
public:
ReplicatedDatabasesMetadataSync(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
: zookeeper_path(zookeeper_path_), get_zookeeper(get_zookeeper_), log(&Poco::Logger::get("RestoreCoordination"))
{
createRootNodes();
}
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
bool startCreatingTable(
const String & host_id_, const String & database_name_, const String & database_zk_path_, const String & table_name_)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/" + escapeForFileName(database_zk_path_);
zookeeper->createIfNotExists(path, "");
TableStatus status;
status.host_id = host_id_;
status.table_name = DatabaseAndTableName{database_name_, table_name_};
path += "/" + escapeForFileName(table_name_);
auto code = zookeeper->tryCreate(path, status.serialize(), zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
throw zkutil::KeeperException(code, path);
return (code == Coordination::Error::ZOK);
}
/// Sets that either we have been created a table in a replicated database or failed doing that.
/// In the latter case `error_message` should be set.
/// Calling this function unblocks other hosts waiting for this table to be created (see waitForCreatingTableInReplicatedDB()).
void finishCreatingTable(
const String & /* host_id_ */,
const String & database_name_,
const String & database_zk_path_,
const String & table_name_,
const String & error_message_)
{
if (error_message_.empty())
LOG_TRACE(log, "Created table {}.{}", database_name_, table_name_);
else
LOG_TRACE(log, "Failed to created table {}.{}: {}", database_name_, table_name_, error_message_);
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/" + escapeForFileName(database_zk_path_) + "/" + escapeForFileName(table_name_);
auto status = TableStatus::deserialize(zookeeper->get(path));
status.error_message = error_message_;
status.ready = error_message_.empty();
zookeeper->set(path, status.serialize());
}
/// Wait for another host to create a table in a replicated database.
void waitForTableCreated(
const String & /* database_name_ */, const String & database_zk_path_, const String & table_name_, std::chrono::seconds timeout_)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/" + escapeForFileName(database_zk_path_) + "/" + escapeForFileName(table_name_);
TableStatus status;
std::atomic<bool> watch_set = false;
std::condition_variable watch_triggered_event;
auto watch_callback = [&](const Coordination::WatchResponse &)
{
watch_set = false; /// After it's triggered it's not set until we call getChildrenWatch() again.
watch_triggered_event.notify_all();
};
auto watch_triggered = [&] { return !watch_set; };
bool use_timeout = (timeout_.count() >= 0);
std::chrono::steady_clock::duration time_left = timeout_;
std::mutex dummy_mutex;
while (true)
{
if (use_timeout && (time_left.count() <= 0))
{
status = TableStatus::deserialize(zookeeper->get(path));
break;
}
watch_set = true;
status = TableStatus::deserialize(zookeeper->getWatch(path, nullptr, watch_callback));
if (!status.error_message.empty() || status.ready)
break;
LOG_TRACE(log, "Waiting for host {} to create table {}.{}", status.host_id, status.table_name.first, status.table_name.second);
{
std::unique_lock dummy_lock{dummy_mutex};
if (use_timeout)
{
std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now();
if (!watch_triggered_event.wait_for(dummy_lock, time_left, watch_triggered))
break;
time_left -= (std::chrono::steady_clock::now() - start_time);
}
else
watch_triggered_event.wait(dummy_lock, watch_triggered);
}
}
if (watch_set)
{
/// Remove watch by triggering it.
++status.increment;
zookeeper->set(path, status.serialize());
std::unique_lock dummy_lock{dummy_mutex};
watch_triggered_event.wait_for(dummy_lock, timeout_, watch_triggered);
}
if (!status.error_message.empty())
throw Exception(
ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE,
"Host {} failed to create table {}.{}: {}", status.host_id, status.table_name.first, status.table_name.second, status.error_message);
if (status.ready)
{
LOG_TRACE(log, "Host {} created table {}.{}", status.host_id, status.table_name.first, status.table_name.second);
return;
}
throw Exception(
ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE,
"Host {} was unable to create table {}.{} in {}",
status.host_id,
status.table_name.first,
table_name_,
to_string(timeout_));
}
private:
void createRootNodes()
{
auto zookeeper = get_zookeeper();
zookeeper->createAncestors(zookeeper_path);
zookeeper->createIfNotExists(zookeeper_path, "");
}
struct TableStatus
{
String host_id;
DatabaseAndTableName table_name;
@ -28,54 +205,44 @@ namespace
String error_message;
size_t increment = 0;
void write(WriteBuffer & out) const
String serialize() const
{
WriteBufferFromOwnString out;
writeBinary(host_id, out);
writeBinary(table_name.first, out);
writeBinary(table_name.second, out);
writeBinary(ready, out);
writeBinary(error_message, out);
writeBinary(increment, out);
return out.str();
}
void read(ReadBuffer & in)
static TableStatus deserialize(const String & str)
{
readBinary(host_id, in);
readBinary(table_name.first, in);
readBinary(table_name.second, in);
readBinary(ready, in);
readBinary(error_message, in);
readBinary(increment, in);
ReadBufferFromString in{str};
TableStatus res;
readBinary(res.host_id, in);
readBinary(res.table_name.first, in);
readBinary(res.table_name.second, in);
readBinary(res.ready, in);
readBinary(res.error_message, in);
readBinary(res.increment, in);
return res;
}
};
struct ReplicatedTableDataPath
{
String host_id;
DatabaseAndTableName table_name;
String data_path_in_backup;
void write(WriteBuffer & out) const
{
writeBinary(host_id, out);
writeBinary(table_name.first, out);
writeBinary(table_name.second, out);
writeBinary(data_path_in_backup, out);
}
void read(ReadBuffer & in)
{
readBinary(host_id, in);
readBinary(table_name.first, in);
readBinary(table_name.second, in);
readBinary(data_path_in_backup, in);
}
};
}
const String zookeeper_path;
const zkutil::GetZooKeeper get_zookeeper;
const Poco::Logger * log;
};
RestoreCoordinationDistributed::RestoreCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_)
: zookeeper_path(zookeeper_path_), get_zookeeper(get_zookeeper_), log(&Poco::Logger::get("RestoreCoordinationDistributed"))
: zookeeper_path(zookeeper_path_)
, get_zookeeper(get_zookeeper_)
, replicated_databases_metadata_sync(
std::make_unique<ReplicatedDatabasesMetadataSync>(zookeeper_path_ + "/repl_databases_metadata", get_zookeeper_))
, all_metadata_barrier(zookeeper_path_ + "/all_metadata", get_zookeeper_, "RestoreCoordination", "restoring metadata")
{
createRootNodes();
}
@ -87,9 +254,7 @@ void RestoreCoordinationDistributed::createRootNodes()
auto zookeeper = get_zookeeper();
zookeeper->createAncestors(zookeeper_path);
zookeeper->createIfNotExists(zookeeper_path, "");
zookeeper->createIfNotExists(zookeeper_path + "/tables_in_repl_databases", "");
zookeeper->createIfNotExists(zookeeper_path + "/metadata_ready", "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_data_paths", "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_paths", "");
zookeeper->createIfNotExists(zookeeper_path + "/repl_tables_partitions", "");
}
@ -100,302 +265,54 @@ void RestoreCoordinationDistributed::removeAllNodes()
}
bool RestoreCoordinationDistributed::startCreatingTableInReplicatedDB(
const String & host_id_, const String & database_name_, const String & database_zk_path_, const String & table_name_)
const String & host_id, const String & database_name, const String & database_zk_path, const String & table_name)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/tables_in_repl_databases/" + escapeForFileName(database_zk_path_);
zookeeper->createIfNotExists(path, "");
TableInReplicatedDatabaseStatus status;
status.host_id = host_id_;
status.table_name = DatabaseAndTableName{database_name_, table_name_};
String status_str;
{
WriteBufferFromOwnString buf;
status.write(buf);
status_str = buf.str();
}
path += "/" + escapeForFileName(table_name_);
auto code = zookeeper->tryCreate(path, status_str, zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
throw zkutil::KeeperException(code, path);
return (code == Coordination::Error::ZOK);
return replicated_databases_metadata_sync->startCreatingTable(host_id, database_name, database_zk_path, table_name);
}
/// Ends creating table in a replicated database, successfully or with an error.
/// In the latter case `error_message` should be set.
void RestoreCoordinationDistributed::finishCreatingTableInReplicatedDB(
const String & /* host_id_ */,
const String & database_name_,
const String & database_zk_path_,
const String & table_name_,
const String & error_message_)
const String & host_id,
const String & database_name,
const String & database_zk_path,
const String & table_name,
const String & error_message)
{
if (error_message_.empty())
LOG_TRACE(log, "Created table {}.{}", database_name_, table_name_);
else
LOG_TRACE(log, "Failed to created table {}.{}: {}", database_name_, table_name_, error_message_);
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/tables_in_repl_databases/" + escapeForFileName(database_zk_path_) + "/" + escapeForFileName(table_name_);
TableInReplicatedDatabaseStatus status;
String status_str = zookeeper->get(path);
{
ReadBufferFromString buf{status_str};
status.read(buf);
}
status.error_message = error_message_;
status.ready = error_message_.empty();
{
WriteBufferFromOwnString buf;
status.write(buf);
status_str = buf.str();
}
zookeeper->set(path, status_str);
return replicated_databases_metadata_sync->finishCreatingTable(host_id, database_name, database_zk_path, table_name, error_message);
}
/// Wait for another host to create a table in a replicated database.
void RestoreCoordinationDistributed::waitForCreatingTableInReplicatedDB(
const String & /* database_name_ */, const String & database_zk_path_, const String & table_name_, std::chrono::seconds timeout_)
void RestoreCoordinationDistributed::waitForTableCreatedInReplicatedDB(
const String & database_name, const String & database_zk_path, const String & table_name, std::chrono::seconds timeout)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/tables_in_repl_databases/" + escapeForFileName(database_zk_path_) + "/" + escapeForFileName(table_name_);
TableInReplicatedDatabaseStatus status;
std::atomic<bool> watch_set = false;
std::condition_variable watch_triggered_event;
auto watch_callback = [&](const Coordination::WatchResponse &)
{
watch_set = false; /// After it's triggered it's not set until we call getChildrenWatch() again.
watch_triggered_event.notify_all();
};
auto watch_triggered = [&] { return !watch_set; };
bool use_timeout = (timeout_.count() > 0);
std::chrono::steady_clock::duration time_left = timeout_;
std::mutex dummy_mutex;
while (!use_timeout || (time_left.count() > 0))
{
watch_set = true;
String status_str = zookeeper->getWatch(path, nullptr, watch_callback);
{
ReadBufferFromString buf{status_str};
status.read(buf);
}
if (!status.error_message.empty())
throw Exception(
ErrorCodes::FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE,
"Host {} failed to create table {}.{}: {}", status.host_id, status.table_name.first, status.table_name.second, status.error_message);
if (status.ready)
{
LOG_TRACE(log, "Host {} created table {}.{}", status.host_id, status.table_name.first, status.table_name.second);
return;
}
LOG_TRACE(log, "Waiting for host {} to create table {}.{}", status.host_id, status.table_name.first, status.table_name.second);
std::chrono::steady_clock::time_point start_time;
if (use_timeout)
start_time = std::chrono::steady_clock::now();
bool waited;
{
std::unique_lock dummy_lock{dummy_mutex};
if (use_timeout)
{
waited = watch_triggered_event.wait_for(dummy_lock, time_left, watch_triggered);
}
else
{
watch_triggered_event.wait(dummy_lock, watch_triggered);
waited = true;
}
}
if (use_timeout)
{
time_left -= (std::chrono::steady_clock::now() - start_time);
if (time_left.count() < 0)
time_left = std::chrono::steady_clock::duration::zero();
}
if (!waited)
break;
}
if (watch_set)
{
/// Remove watch by triggering it.
++status.increment;
WriteBufferFromOwnString buf;
status.write(buf);
zookeeper->set(path, buf.str());
std::unique_lock dummy_lock{dummy_mutex};
watch_triggered_event.wait_for(dummy_lock, timeout_, watch_triggered);
}
throw Exception(
ErrorCodes::FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE,
"Host {} was unable to create table {}.{} in {}",
status.host_id,
status.table_name.first,
table_name_,
to_string(timeout_));
return replicated_databases_metadata_sync->waitForTableCreated(database_name, database_zk_path, table_name, timeout);
}
void RestoreCoordinationDistributed::finishRestoringMetadata(const String & host_id_, const String & error_message_)
void RestoreCoordinationDistributed::finishRestoringMetadata(const String & host_id, const String & error_message)
{
LOG_TRACE(log, "Finished restoring metadata{}", (error_message_.empty() ? "" : (" with error " + error_message_)));
auto zookeeper = get_zookeeper();
if (error_message_.empty())
zookeeper->create(zookeeper_path + "/metadata_ready/" + host_id_ + ":ready", "", zkutil::CreateMode::Persistent);
else
zookeeper->create(zookeeper_path + "/metadata_ready/" + host_id_ + ":error", error_message_, zkutil::CreateMode::Persistent);
all_metadata_barrier.finish(host_id, error_message);
}
void RestoreCoordinationDistributed::waitForAllHostsToRestoreMetadata(const Strings & host_ids_, std::chrono::seconds timeout_) const
void RestoreCoordinationDistributed::waitForAllHostsRestoredMetadata(const Strings & host_ids, std::chrono::seconds timeout) const
{
auto zookeeper = get_zookeeper();
bool all_hosts_ready = false;
String not_ready_host_id;
String error_host_id;
String error_message;
/// Returns true of everything's ready, or false if we need to wait more.
auto process_nodes = [&](const Strings & nodes)
{
std::unordered_set<std::string_view> set{nodes.begin(), nodes.end()};
for (const String & host_id : host_ids_)
{
if (set.contains(host_id + ":error"))
{
error_host_id = host_id;
error_message = zookeeper->get(zookeeper_path + "/metadata_ready/" + host_id + ":error");
return;
}
if (!set.contains(host_id + ":ready"))
{
LOG_TRACE(log, "Waiting for host {} to restore its metadata", host_id);
not_ready_host_id = host_id;
return;
}
}
all_hosts_ready = true;
};
std::atomic<bool> watch_set = false;
std::condition_variable watch_triggered_event;
auto watch_callback = [&](const Coordination::WatchResponse &)
{
watch_set = false; /// After it's triggered it's not set until we call getChildrenWatch() again.
watch_triggered_event.notify_all();
};
auto watch_triggered = [&] { return !watch_set; };
bool use_timeout = (timeout_.count() > 0);
std::chrono::steady_clock::duration time_left = timeout_;
std::mutex dummy_mutex;
while (!use_timeout || (time_left.count() > 0))
{
watch_set = true;
Strings children = zookeeper->getChildrenWatch(zookeeper_path + "/metadata_ready", nullptr, watch_callback);
process_nodes(children);
if (!error_message.empty())
throw Exception(
ErrorCodes::FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE,
"Host {} was unable to restore its metadata: {}",
error_host_id,
error_message);
if (all_hosts_ready)
{
LOG_TRACE(log, "All hosts have finished restoring metadata");
return;
}
std::chrono::steady_clock::time_point start_time;
if (use_timeout)
start_time = std::chrono::steady_clock::now();
bool waited;
{
std::unique_lock dummy_lock{dummy_mutex};
if (use_timeout)
{
waited = watch_triggered_event.wait_for(dummy_lock, time_left, watch_triggered);
}
else
{
watch_triggered_event.wait(dummy_lock, watch_triggered);
waited = true;
}
}
if (use_timeout)
{
time_left -= (std::chrono::steady_clock::now() - start_time);
if (time_left.count() < 0)
time_left = std::chrono::steady_clock::duration::zero();
}
if (!waited)
break;
}
if (watch_set)
{
/// Remove watch by triggering it.
zookeeper->create(zookeeper_path + "/metadata_ready/remove_watch-", "", zkutil::CreateMode::EphemeralSequential);
std::unique_lock dummy_lock{dummy_mutex};
watch_triggered_event.wait_for(dummy_lock, timeout_, watch_triggered);
}
throw Exception(
ErrorCodes::FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE,
"Host {} was unable to restore its metadata in {}",
not_ready_host_id,
to_string(timeout_));
all_metadata_barrier.waitForAllHostsToFinish(host_ids, timeout);
}
void RestoreCoordinationDistributed::setReplicatedTableDataPath(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & data_path_in_backup_)
void RestoreCoordinationDistributed::addReplicatedTableDataPath(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & data_path_in_backup)
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_data_paths/" + escapeForFileName(table_zk_path_);
String path = zookeeper_path + "/repl_tables_paths/" + escapeForFileName(table_zk_path);
String new_info_str;
{
ReplicatedTableDataPath new_info;
new_info.host_id = host_id_;
new_info.table_name = table_name_;
new_info.data_path_in_backup = data_path_in_backup_;
WriteBufferFromOwnString buf;
new_info.write(buf);
new_info_str = buf.str();
}
ReplicatedTableDataPath new_info;
new_info.host_id = host_id;
new_info.table_name = table_name;
new_info.data_path_in_backup = data_path_in_backup;
String new_info_str = new_info.serialize();
auto code = zookeeper->tryCreate(path, new_info_str, zkutil::CreateMode::Persistent);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZNODEEXISTS))
@ -404,11 +321,8 @@ void RestoreCoordinationDistributed::setReplicatedTableDataPath(
while (code != Coordination::Error::ZOK)
{
Coordination::Stat stat;
String cur_info_str = zookeeper->get(path, &stat);
ReadBufferFromString buf{cur_info_str};
ReplicatedTableDataPath cur_info;
cur_info.read(buf);
if ((cur_info.host_id < host_id_) || ((cur_info.host_id == host_id_) && (cur_info.table_name <= table_name_)))
ReplicatedTableDataPath cur_info = ReplicatedTableDataPath::deserialize(zookeeper->get(path, &stat));
if ((cur_info.host_id < host_id) || ((cur_info.host_id == host_id) && (cur_info.table_name <= table_name)))
break;
code = zookeeper->trySet(path, new_info_str, stat.version);
if ((code != Coordination::Error::ZOK) && (code != Coordination::Error::ZBADVERSION))
@ -419,11 +333,8 @@ void RestoreCoordinationDistributed::setReplicatedTableDataPath(
String RestoreCoordinationDistributed::getReplicatedTableDataPath(const String & table_zk_path_) const
{
auto zookeeper = get_zookeeper();
String path = zookeeper_path + "/repl_tables_data_paths/" + escapeForFileName(table_zk_path_);
String info_str = zookeeper->get(path);
ReadBufferFromString buf{info_str};
ReplicatedTableDataPath info;
info.read(buf);
String path = zookeeper_path + "/repl_tables_paths/" + escapeForFileName(table_zk_path_);
auto info = ReplicatedTableDataPath::deserialize(zookeeper->get(path));
return info.data_path_in_backup;
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <Backups/IRestoreCoordination.h>
#include <Backups/BackupCoordinationHelpers.h>
#include <Common/ZooKeeper/Common.h>
@ -11,42 +12,42 @@ namespace DB
class RestoreCoordinationDistributed : public IRestoreCoordination
{
public:
RestoreCoordinationDistributed(const String & zookeeper_path_, zkutil::GetZooKeeper get_zookeeper_);
RestoreCoordinationDistributed(const String & zookeeper_path, zkutil::GetZooKeeper get_zookeeper);
~RestoreCoordinationDistributed() override;
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
bool startCreatingTableInReplicatedDB(
const String & host_id_, const String & database_name_, const String & database_zk_path_, const String & table_name_) override;
const String & host_id, const String & database_name, const String & database_zk_path, const String & table_name) override;
/// Sets that either we have been created a table in a replicated database or failed doing that.
/// In the latter case `error_message` should be set.
/// Calling this function unblocks other hosts waiting for this table to be created (see waitForCreatingTableInReplicatedDB()).
void finishCreatingTableInReplicatedDB(
const String & host_id_,
const String & database_name_,
const String & database_zk_path_,
const String & table_name_,
const String & error_message_) override;
const String & host_id,
const String & database_name,
const String & database_zk_path,
const String & table_name,
const String & error_message) override;
/// Wait for another host to create a table in a replicated database.
void waitForCreatingTableInReplicatedDB(
const String & database_name_, const String & database_zk_path_, const String & table_name_, std::chrono::seconds timeout_) override;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
/// In the latter case `error_message` should be set.
void finishRestoringMetadata(const String & host_id_, const String & error_message_) override;
/// Waits for all hosts to finish restoring their metadata (i.e. to finish creating databases and tables). Returns false if time is out.
void waitForAllHostsToRestoreMetadata(const Strings & host_ids_, std::chrono::seconds timeout_) const override;
void waitForTableCreatedInReplicatedDB(
const String & database_name, const String & database_zk_path, const String & table_name, std::chrono::seconds timeout) override;
/// Sets path in backup used by a replicated table.
/// This function can be called multiple times for the same table with different `host_id`, and in that case
/// getReplicatedTableDataPath() will choose `data_path_in_backup` with the lexicographycally first `host_id`.
void setReplicatedTableDataPath(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & data_path_in_backup_) override;
void addReplicatedTableDataPath(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & data_path_in_backup) override;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
/// In the latter case `error_message` should be set.
void finishRestoringMetadata(const String & host_id, const String & error_message) override;
/// Waits for all hosts to finish restoring their metadata (i.e. to finish creating databases and tables). Returns false if time is out.
void waitForAllHostsRestoredMetadata(const Strings & host_ids, std::chrono::seconds timeout) const override;
/// Gets path in backup used by a replicated table.
String getReplicatedTableDataPath(const String & table_zk_path) const override;
@ -54,10 +55,10 @@ public:
/// Sets that this replica is going to restore a partition in a replicated table.
/// The function returns false if this partition is being already restored by another replica.
bool startInsertingDataToPartitionInReplicatedTable(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & partition_name_) override;
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & partition_name) override;
/// Removes remotely stored information.
void drop() override;
@ -66,9 +67,12 @@ private:
void createRootNodes();
void removeAllNodes();
class ReplicatedDatabasesMetadataSync;
const String zookeeper_path;
const zkutil::GetZooKeeper get_zookeeper;
const Poco::Logger * log;
std::unique_ptr<ReplicatedDatabasesMetadataSync> replicated_databases_metadata_sync;
BackupCoordinationDistributedBarrier all_metadata_barrier;
};
}

View File

@ -3,7 +3,6 @@
#include <Common/Exception.h>
#include <Common/logger_useful.h>
#include <base/chrono_io.h>
#include <boost/range/adaptor/map.hpp>
namespace DB
@ -16,73 +15,73 @@ namespace ErrorCodes
RestoreCoordinationLocal::RestoreCoordinationLocal()
: log(&Poco::Logger::get("RestoreCoordinationLocal"))
: log(&Poco::Logger::get("RestoreCoordination"))
{}
RestoreCoordinationLocal::~RestoreCoordinationLocal() = default;
bool RestoreCoordinationLocal::startCreatingTableInReplicatedDB(
const String & /* host_id_ */,
const String & /* database_name_ */,
const String & /* database_zk_path_*/,
const String & /* table_name_ */)
const String & /* host_id */,
const String & /* database_name */,
const String & /* database_zk_path */,
const String & /* table_name */)
{
return true;
}
void RestoreCoordinationLocal::finishCreatingTableInReplicatedDB(
const String & /* host_id_ */,
const String & database_name_,
const String & /* database_zk_path_ */,
const String & table_name_,
const String & error_message_)
const String & /* host_id */,
const String & database_name,
const String & /* database_zk_path */,
const String & table_name,
const String & error_message)
{
if (error_message_.empty())
LOG_TRACE(log, "Created table {}.{}", database_name_, table_name_);
if (error_message.empty())
LOG_TRACE(log, "Created table {}.{}", database_name, table_name);
else
LOG_TRACE(log, "Failed to created table {}.{}: {}", database_name_, table_name_, error_message_);
LOG_TRACE(log, "Failed to created table {}.{}: {}", database_name, table_name, error_message);
}
/// Wait for another host to create a table in a replicated database.
void RestoreCoordinationLocal::waitForCreatingTableInReplicatedDB(
const String & /* database_name_ */,
const String & /* database_zk_path_ */,
const String & /* table_name_ */,
std::chrono::seconds /* timeout_ */)
void RestoreCoordinationLocal::waitForTableCreatedInReplicatedDB(
const String & /* database_name */,
const String & /* database_zk_path */,
const String & /* table_name */,
std::chrono::seconds /* timeout */)
{
}
void RestoreCoordinationLocal::finishRestoringMetadata(const String & /* host_id */, const String & error_message_)
void RestoreCoordinationLocal::finishRestoringMetadata(const String & /* host_id */, const String & error_message)
{
LOG_TRACE(log, "Finished restoring metadata{}", (error_message_.empty() ? "" : (" with error " + error_message_)));
LOG_TRACE(log, "Finished restoring metadata{}", (error_message.empty() ? "" : (" with error " + error_message)));
}
void RestoreCoordinationLocal::waitForAllHostsToRestoreMetadata(const Strings & /* host_ids_ */, std::chrono::seconds /* timeout_ */) const
void RestoreCoordinationLocal::waitForAllHostsRestoredMetadata(const Strings & /* host_ids */, std::chrono::seconds /* timeout */) const
{
}
void RestoreCoordinationLocal::setReplicatedTableDataPath(const String & /* host_id_ */,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & data_path_in_backup_)
void RestoreCoordinationLocal::addReplicatedTableDataPath(const String & /* host_id */,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & data_path_in_backup)
{
std::lock_guard lock{mutex};
auto it = replicated_tables_data_paths.find(table_zk_path_);
auto it = replicated_tables_data_paths.find(table_zk_path);
if (it == replicated_tables_data_paths.end())
{
ReplicatedTableDataPath new_info;
new_info.table_name = table_name_;
new_info.data_path_in_backup = data_path_in_backup_;
replicated_tables_data_paths.emplace(table_zk_path_, std::move(new_info));
new_info.table_name = table_name;
new_info.data_path_in_backup = data_path_in_backup;
replicated_tables_data_paths.emplace(table_zk_path, std::move(new_info));
return;
}
else
{
auto & cur_info = it->second;
if (table_name_ < cur_info.table_name)
if (table_name < cur_info.table_name)
{
cur_info.table_name = table_name_;
cur_info.data_path_in_backup = data_path_in_backup_;
cur_info.table_name = table_name;
cur_info.data_path_in_backup = data_path_in_backup;
}
}
}
@ -97,12 +96,12 @@ String RestoreCoordinationLocal::getReplicatedTableDataPath(const String & table
}
bool RestoreCoordinationLocal::startInsertingDataToPartitionInReplicatedTable(
const String & /* host_id_ */, const DatabaseAndTableName & table_name_, const String & table_zk_path_, const String & partition_name_)
const String & /* host_id */, const DatabaseAndTableName & table_name, const String & table_zk_path, const String & partition_name)
{
std::lock_guard lock{mutex};
auto key = std::pair{table_zk_path_, partition_name_};
auto it = replicated_tables_partitions.try_emplace(std::move(key), table_name_).first;
return it->second == table_name_;
auto key = std::pair{table_zk_path, partition_name};
auto it = replicated_tables_partitions.try_emplace(std::move(key), table_name).first;
return it->second == table_name;
}
}

View File

@ -20,48 +20,48 @@ public:
/// Starts creating a table in a replicated database. Returns false if there is another host which is already creating this table.
bool startCreatingTableInReplicatedDB(
const String & host_id_, const String & database_name_, const String & database_zk_path_, const String & table_name_) override;
const String & host_id, const String & database_name, const String & database_zk_path, const String & table_name) override;
/// Sets that either we have been created a table in a replicated database or failed doing that.
/// In the latter case `error_message` should be set.
/// Calling this function unblocks other hosts waiting for this table to be created (see waitForCreatingTableInReplicatedDB()).
void finishCreatingTableInReplicatedDB(
const String & host_id_,
const String & database_name_,
const String & database_zk_path_,
const String & table_name_,
const String & error_message_) override;
const String & host_id,
const String & database_name,
const String & database_zk_path,
const String & table_name,
const String & error_message) override;
/// Wait for another host to create a table in a replicated database.
void waitForCreatingTableInReplicatedDB(
const String & database_name_, const String & database_zk_path_, const String & table_name_, std::chrono::seconds timeout_) override;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
/// In the latter case `error_message` should be set.
void finishRestoringMetadata(const String & host_id_, const String & error_message_) override;
/// Waits for all hosts to finish restoring their metadata (i.e. to finish creating databases and tables). Returns false if time is out.
void waitForAllHostsToRestoreMetadata(const Strings & host_ids_, std::chrono::seconds timeout_) const override;
void waitForTableCreatedInReplicatedDB(
const String & database_name, const String & database_zk_path, const String & table_name, std::chrono::seconds timeout) override;
/// Sets path in backup used by a replicated table.
/// This function can be called multiple times for the same table with different `host_id`, and in that case
/// getReplicatedTableDataPath() will choose `data_path_in_backup` with the lexicographycally first `host_id`.
void setReplicatedTableDataPath(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & data_path_in_backup_) override;
void addReplicatedTableDataPath(
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & data_path_in_backup) override;
/// Sets that a specified host has finished restoring metadata, successfully or with an error.
/// In the latter case `error_message` should be set.
void finishRestoringMetadata(const String & host_id, const String & error_message) override;
/// Waits for all hosts to finish restoring their metadata (i.e. to finish creating databases and tables). Returns false if time is out.
void waitForAllHostsRestoredMetadata(const Strings & host_ids, std::chrono::seconds timeout) const override;
/// Gets path in backup used by a replicated table.
String getReplicatedTableDataPath(const String & table_zk_path_) const override;
String getReplicatedTableDataPath(const String & table_zk_path) const override;
/// Sets that this replica is going to restore a partition in a replicated table.
/// The function returns false if this partition is being already restored by another replica.
bool startInsertingDataToPartitionInReplicatedTable(
const String & host_id_,
const DatabaseAndTableName & table_name_,
const String & table_zk_path_,
const String & partition_name_) override;
const String & host_id,
const DatabaseAndTableName & table_name,
const String & table_zk_path,
const String & partition_name) override;
private:
struct ReplicatedTableDataPath

View File

@ -34,8 +34,7 @@ using RestoreDatabaseCreationMode = RestoreTableCreationMode;
struct RestoreSettings : public StorageRestoreSettings
{
/// Base backup, with this setting we can override the location of the base backup while restoring.
/// Any incremental backup keeps inside the information about its base backup,
/// so using this setting is optional.
/// Any incremental backup keeps inside the information about its base backup, so using this setting is optional.
std::optional<BackupInfo> base_backup_info;
/// Password used to decrypt the backup.

View File

@ -24,9 +24,7 @@
#include <Storages/StorageReplicatedMergeTree.h>
#include <base/chrono_io.h>
#include <base/insertAtEnd.h>
#include <boost/range/adaptor/reversed.hpp>
#include <boost/range/algorithm_ext/erase.hpp>
#include <filesystem>
namespace DB
@ -43,7 +41,7 @@ namespace
class PathsInBackup
{
public:
explicit PathsInBackup(const IBackup & backup_) : backup(backup_) {}
explicit PathsInBackup(const IBackup & backup_) : backup(backup_) { }
std::vector<size_t> getShards() const
{
@ -96,7 +94,9 @@ namespace
std::vector<String> res;
String escaped_database_name = escapeForFileName(database_name);
insertAtEnd(res, backup.listFiles(fmt::format("shards/{}/replicas/{}/metadata/{}/", shard_index, replica_index, escaped_database_name)));
insertAtEnd(
res,
backup.listFiles(fmt::format("shards/{}/replicas/{}/metadata/{}/", shard_index, replica_index, escaped_database_name)));
insertAtEnd(res, backup.listFiles(fmt::format("shards/{}/metadata/{}/", shard_index, escaped_database_name)));
insertAtEnd(res, backup.listFiles(fmt::format("metadata/{}/", escaped_database_name)));
@ -172,10 +172,7 @@ namespace
class RestoreDatabaseTask : public IRestoreTask
{
public:
RestoreDatabaseTask(
ContextMutablePtr context_,
const ASTPtr & create_query_,
const RestoreSettingsPtr & restore_settings_)
RestoreDatabaseTask(ContextMutablePtr context_, const ASTPtr & create_query_, const RestoreSettingsPtr & restore_settings_)
: context(context_)
, create_query(typeid_cast<std::shared_ptr<ASTCreateQuery>>(create_query_))
, restore_settings(restore_settings_)
@ -201,6 +198,7 @@ namespace
auto cloned_create_query = typeid_cast<std::shared_ptr<ASTCreateQuery>>(create_query->clone());
cloned_create_query->if_not_exists = (restore_settings->create_database == RestoreDatabaseCreationMode::kCreateIfNotExists);
InterpreterCreateQuery create_interpreter{cloned_create_query, context};
create_interpreter.setInternal(true);
create_interpreter.execute();
}
@ -244,52 +242,6 @@ namespace
};
class RestoreTableDataTask : public IRestoreTask
{
public:
RestoreTableDataTask(
ContextMutablePtr context_,
StoragePtr storage_,
const ASTs & partitions_,
const BackupPtr & backup_,
const String & data_path_in_backup_,
const RestoreSettingsPtr & restore_settings_,
const std::shared_ptr<IRestoreCoordination> & restore_coordination_)
: context(context_)
, storage(storage_)
, partitions(partitions_)
, backup(backup_)
, data_path_in_backup(data_path_in_backup_)
, restore_settings(restore_settings_)
, restore_coordination(restore_coordination_)
{
}
RestoreTasks run() override
{
const auto * replicated_table = typeid_cast<const StorageReplicatedMergeTree *>(storage.get());
if (replicated_table)
{
data_path_in_backup = restore_coordination->getReplicatedTableDataPath(
replicated_table->getZooKeeperName() + replicated_table->getZooKeeperPath());
}
RestoreTasks tasks;
tasks.emplace_back(storage->restoreData(context, partitions, backup, data_path_in_backup, *restore_settings, restore_coordination));
return tasks;
}
private:
ContextMutablePtr context;
StoragePtr storage;
ASTs partitions;
BackupPtr backup;
String data_path_in_backup;
RestoreSettingsPtr restore_settings;
std::shared_ptr<IRestoreCoordination> restore_coordination;
};
/// Restores a table.
class RestoreTableTask : public IRestoreTask
{
@ -393,7 +345,8 @@ namespace
if (!replicated_database)
return;
restore_coordination->waitForCreatingTableInReplicatedDB(table_name.first, replicated_database->getZooKeeperPath(), table_name.second);
restore_coordination->waitForTableCreatedInReplicatedDB(
table_name.first, replicated_database->getZooKeeperPath(), table_name.second);
/// The table `table_name` was created on other host, must be in the replicated database's queue,
/// we have to wait until the replicated database syncs that.
@ -402,7 +355,8 @@ namespace
bool use_timeout = (timeout_for_restoring_metadata.count() > 0);
while (!database->isTableExist(table_name.second, context))
{
if (replicated_database_synced || (use_timeout && (std::chrono::steady_clock::now() - start_time) >= timeout_for_restoring_metadata))
if (replicated_database_synced
|| (use_timeout && (std::chrono::steady_clock::now() - start_time) >= timeout_for_restoring_metadata))
{
throw Exception(
ErrorCodes::CANNOT_RESTORE_TABLE,
@ -423,7 +377,8 @@ namespace
if (!restore_settings->structure_only)
{
data_path_in_backup = PathsInBackup{*backup}.getDataPath(table_name_in_backup, restore_settings->shard_num_in_backup, restore_settings->replica_num_in_backup);
data_path_in_backup = PathsInBackup{*backup}.getDataPath(
table_name_in_backup, restore_settings->shard_num_in_backup, restore_settings->replica_num_in_backup);
has_data = !backup->listFiles(data_path_in_backup).empty();
const auto * replicated_table = typeid_cast<const StorageReplicatedMergeTree *>(storage.get());
@ -435,7 +390,7 @@ namespace
/// That's why we use the restore coordination here: on restoring metadata stage each replica sets its own
/// `data_path_in_backup` for same zookeeper path, and then the restore coordination choose one `data_path_in_backup`
/// to use for restoring data.
restore_coordination->setReplicatedTableDataPath(
restore_coordination->addReplicatedTableDataPath(
restore_settings->host_id,
table_name_in_backup,
replicated_table->getZooKeeperName() + replicated_table->getZooKeeperPath(),
@ -528,7 +483,8 @@ namespace
return {};
RestoreTasks tasks;
tasks.emplace_back(std::make_unique<RestoreTableDataTask>(context, storage, partitions, backup, data_path_in_backup, restore_settings, restore_coordination));
tasks.emplace_back(
storage->restoreData(context, partitions, backup, data_path_in_backup, *restore_settings, restore_coordination));
return tasks;
}
@ -579,21 +535,18 @@ namespace
{
switch (element.type)
{
case ElementType::TABLE:
{
case ElementType::TABLE: {
prepareToRestoreTable(element.name, element.partitions);
break;
}
case ElementType::DATABASE:
{
case ElementType::DATABASE: {
const String & database_name = element.name.first;
prepareToRestoreDatabase(database_name, element.except_list);
break;
}
case ElementType::ALL_DATABASES:
{
case ElementType::ALL_DATABASES: {
prepareToRestoreAllDatabases(element.except_list);
break;
}
@ -612,7 +565,15 @@ namespace
/// TODO: We need to restore tables according to their dependencies.
for (const auto & info : tables | boost::adaptors::map_values)
res.push_back(std::make_unique<RestoreTableTask>(context, info.create_query, info.partitions, backup, info.name_in_backup, restore_settings_ptr, restore_coordination, timeout_for_restoring_metadata));
res.push_back(std::make_unique<RestoreTableTask>(
context,
info.create_query,
info.partitions,
backup,
info.name_in_backup,
restore_settings_ptr,
restore_coordination,
timeout_for_restoring_metadata));
return res;
}
@ -645,12 +606,11 @@ namespace
{
if (replicas_in_backup.size() == 1)
restore_settings.replica_num_in_backup = replicas_in_backup[0];
else
else if (std::find(replicas_in_backup.begin(), replicas_in_backup.end(), replica_num) != replicas_in_backup.end())
restore_settings.replica_num_in_backup = replica_num;
else
restore_settings.replica_num_in_backup = replicas_in_backup[0];
}
if (std::find(replicas_in_backup.begin(), replicas_in_backup.end(), restore_settings.replica_num_in_backup) == replicas_in_backup.end())
throw Exception(ErrorCodes::BACKUP_ENTRY_NOT_FOUND, "No replica #{} in backup", restore_settings.replica_num_in_backup);
}
/// Prepares to restore a single table and probably its database's definition.
@ -659,7 +619,8 @@ namespace
/// Check that we are not trying to restore the same table again.
DatabaseAndTableName new_table_name = renaming_settings.getNewTableName(table_name_);
if (tables.contains(new_table_name))
throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore the {} twice", formatTableNameOrTemporaryTableName(new_table_name));
throw Exception(
ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore the {} twice", formatTableNameOrTemporaryTableName(new_table_name));
/// Make a create query for this table.
auto create_query = renameInCreateQuery(readCreateQueryFromBackup(table_name_));
@ -677,14 +638,19 @@ namespace
/// Check that we are not trying to restore the same database again.
String new_database_name = renaming_settings.getNewDatabaseName(database_name_);
if (databases.contains(new_database_name))
throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} twice", backQuoteIfNeed(new_database_name));
throw Exception(
ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} twice", backQuoteIfNeed(new_database_name));
Strings table_names = PathsInBackup{*backup}.getTables(database_name_, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
Strings table_names = PathsInBackup{*backup}.getTables(
database_name_, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
bool has_tables_in_backup = !table_names.empty();
bool has_create_query_in_backup = hasCreateQueryInBackup(database_name_);
if (!has_create_query_in_backup && !has_tables_in_backup)
throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} because there is no such database in the backup", backQuoteIfNeed(database_name_));
throw Exception(
ErrorCodes::CANNOT_RESTORE_DATABASE,
"Cannot restore the database {} because there is no such database in the backup",
backQuoteIfNeed(database_name_));
/// Of course we're not going to restore the definition of the system or the temporary database.
if (!isSystemOrTemporaryDatabase(new_database_name))
@ -718,7 +684,8 @@ namespace
/// Prepares to restore all the databases contained in the backup.
void prepareToRestoreAllDatabases(const std::set<String> & except_list_)
{
for (const String & database_name : PathsInBackup{*backup}.getDatabases(restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup))
for (const String & database_name :
PathsInBackup{*backup}.getDatabases(restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup))
{
if (except_list_.contains(database_name))
continue;
@ -729,36 +696,46 @@ namespace
/// Reads a create query for creating a specified table from the backup.
std::shared_ptr<ASTCreateQuery> readCreateQueryFromBackup(const DatabaseAndTableName & table_name) const
{
String create_query_path = PathsInBackup{*backup}.getMetadataPath(table_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
String create_query_path = PathsInBackup{*backup}.getMetadataPath(
table_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
if (!backup->fileExists(create_query_path))
throw Exception(ErrorCodes::CANNOT_RESTORE_TABLE, "Cannot restore the {} because there is no such table in the backup",
formatTableNameOrTemporaryTableName(table_name));
throw Exception(
ErrorCodes::CANNOT_RESTORE_TABLE,
"Cannot restore the {} because there is no such table in the backup",
formatTableNameOrTemporaryTableName(table_name));
auto read_buffer = backup->readFile(create_query_path)->getReadBuffer();
String create_query_str;
readStringUntilEOF(create_query_str, *read_buffer);
read_buffer.reset();
ParserCreateQuery create_parser;
return typeid_cast<std::shared_ptr<ASTCreateQuery>>(parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
return typeid_cast<std::shared_ptr<ASTCreateQuery>>(
parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
}
/// Reads a create query for creating a specified database from the backup.
std::shared_ptr<ASTCreateQuery> readCreateQueryFromBackup(const String & database_name) const
{
String create_query_path = PathsInBackup{*backup}.getMetadataPath(database_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
String create_query_path = PathsInBackup{*backup}.getMetadataPath(
database_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
if (!backup->fileExists(create_query_path))
throw Exception(ErrorCodes::CANNOT_RESTORE_DATABASE, "Cannot restore the database {} because there is no such database in the backup", backQuoteIfNeed(database_name));
throw Exception(
ErrorCodes::CANNOT_RESTORE_DATABASE,
"Cannot restore the database {} because there is no such database in the backup",
backQuoteIfNeed(database_name));
auto read_buffer = backup->readFile(create_query_path)->getReadBuffer();
String create_query_str;
readStringUntilEOF(create_query_str, *read_buffer);
read_buffer.reset();
ParserCreateQuery create_parser;
return typeid_cast<std::shared_ptr<ASTCreateQuery>>(parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
return typeid_cast<std::shared_ptr<ASTCreateQuery>>(
parseQuery(create_parser, create_query_str, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
}
/// Whether there is a create query for creating a specified database in the backup.
bool hasCreateQueryInBackup(const String & database_name) const
{
String create_query_path = PathsInBackup{*backup}.getMetadataPath(database_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
String create_query_path = PathsInBackup{*backup}.getMetadataPath(
database_name, restore_settings.shard_num_in_backup, restore_settings.replica_num_in_backup);
return backup->fileExists(create_query_path);
}
@ -799,17 +776,71 @@ namespace
std::map<String /* new_db_name */, CreateDatabaseInfo> databases;
std::map<DatabaseAndTableName /* new_table_name */, CreateTableInfo> tables;
};
}
RestoreTasks makeRestoreTasks(ContextMutablePtr context, const BackupPtr & backup, const Elements & elements, const RestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination, std::chrono::seconds timeout_for_restoring_metadata)
{
try
RestoreTasks makeRestoreTasksImpl(
ContextMutablePtr context,
const BackupPtr & backup,
const Elements & elements,
const RestoreSettings & restore_settings,
const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata)
{
RestoreTasksBuilder builder{context, backup, restore_settings, restore_coordination, timeout_for_restoring_metadata};
builder.prepare(elements);
return builder.makeTasks();
}
void restoreMetadataImpl(RestoreTasks & restore_tasks)
{
/// There are two kinds of restore tasks: sequential and non-sequential ones.
/// Sequential tasks are executed first and always in one thread.
std::deque<std::unique_ptr<IRestoreTask>> restore_metadata_tasks;
boost::range::remove_erase_if(
restore_tasks,
[&restore_metadata_tasks](RestoreTaskPtr & task)
{
if (task->getRestoreKind() == IRestoreTask::RestoreKind::METADATA)
{
restore_metadata_tasks.push_back(std::move(task));
return true;
}
return false;
});
/// Sequential tasks.
while (!restore_metadata_tasks.empty())
{
auto current_task = std::move(restore_metadata_tasks.front());
restore_metadata_tasks.pop_front();
RestoreTasks new_tasks = current_task->run();
for (auto & task : new_tasks)
{
if (task->getRestoreKind() == IRestoreTask::RestoreKind::METADATA)
restore_metadata_tasks.push_back(std::move(task));
else
restore_tasks.push_back(std::move(task));
}
}
}
}
RestoreTasks makeRestoreTasks(
ContextMutablePtr context,
const BackupPtr & backup,
const Elements & elements,
const RestoreSettings & restore_settings,
const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata)
{
try
{
return makeRestoreTasksImpl(context, backup, elements, restore_settings, restore_coordination, timeout_for_restoring_metadata);
}
catch (...)
{
restore_coordination->finishRestoringMetadata(restore_settings.host_id, getCurrentExceptionMessage(false));
@ -818,40 +849,15 @@ RestoreTasks makeRestoreTasks(ContextMutablePtr context, const BackupPtr & backu
}
void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool, const RestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata)
void restoreMetadata(
RestoreTasks & restore_tasks,
const RestoreSettings & restore_settings,
const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata)
{
std::deque<std::unique_ptr<IRestoreTask>> sequential_tasks;
std::deque<std::unique_ptr<IRestoreTask>> enqueued_tasks;
try
{
/// There are two kinds of restore tasks: sequential and non-sequential ones.
/// Sequential tasks are executed first and always in one thread.
for (auto & task : restore_tasks)
{
if (task->getRestoreKind() == IRestoreTask::RestoreKind::METADATA)
sequential_tasks.push_back(std::move(task));
else
enqueued_tasks.push_back(std::move(task));
}
/// Sequential tasks.
while (!sequential_tasks.empty())
{
auto current_task = std::move(sequential_tasks.front());
sequential_tasks.pop_front();
RestoreTasks new_tasks = current_task->run();
for (auto & task : new_tasks)
{
if (task->getRestoreKind() == IRestoreTask::RestoreKind::METADATA)
sequential_tasks.push_back(std::move(task));
else
enqueued_tasks.push_back(std::move(task));
}
}
restoreMetadataImpl(restore_tasks);
}
catch (...)
{
@ -863,13 +869,18 @@ void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool
/// We need this waiting because we're going to call some functions which requires data collected from other nodes too,
/// see IRestoreCoordination::checkTablesNotExistedInReplicatedDBs(), IRestoreCoordination::getReplicatedTableDataPath().
restore_coordination->finishRestoringMetadata(restore_settings.host_id);
if (!restore_settings.host_id.empty())
{
restore_coordination->waitForAllHostsToRestoreMetadata(
BackupSettings::Util::filterHostIDs(
restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num),
timeout_for_restoring_metadata);
}
restore_coordination->waitForAllHostsRestoredMetadata(
BackupSettings::Util::filterHostIDs(
restore_settings.cluster_host_ids, restore_settings.shard_num, restore_settings.replica_num),
timeout_for_restoring_metadata);
}
void restoreData(RestoreTasks & restore_tasks, ThreadPool & thread_pool)
{
std::deque<std::unique_ptr<IRestoreTask>> tasks(std::make_move_iterator(restore_tasks.begin()), std::make_move_iterator(restore_tasks.end()));
restore_tasks.clear();
/// Non-sequential tasks.
size_t num_active_jobs = 0;
@ -882,15 +893,15 @@ void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool
std::unique_ptr<IRestoreTask> current_task;
{
std::unique_lock lock{mutex};
event.wait(lock, [&] { return !enqueued_tasks.empty() || exception || !num_active_jobs; });
if ((enqueued_tasks.empty() && !num_active_jobs) || exception)
event.wait(lock, [&] { return !tasks.empty() || exception || !num_active_jobs; });
if ((tasks.empty() && !num_active_jobs) || exception)
break;
current_task = std::move(enqueued_tasks.front());
enqueued_tasks.pop_front();
current_task = std::move(tasks.front());
tasks.pop_front();
++num_active_jobs;
}
auto job = [current_task = std::shared_ptr<IRestoreTask>(std::move(current_task)), &enqueued_tasks, &num_active_jobs, &exception, &mutex, &event]() mutable
auto job = [current_task = std::shared_ptr<IRestoreTask>(std::move(current_task)), &tasks, &num_active_jobs, &exception, &mutex, &event]() mutable
{
SCOPE_EXIT({
--num_active_jobs;
@ -917,8 +928,7 @@ void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool
{
std::lock_guard lock{mutex};
enqueued_tasks.insert(
enqueued_tasks.end(), std::make_move_iterator(new_tasks.begin()), std::make_move_iterator(new_tasks.end()));
tasks.insert(tasks.end(), std::make_move_iterator(new_tasks.begin()), std::make_move_iterator(new_tasks.end()));
}
};
@ -935,4 +945,64 @@ void executeRestoreTasks(RestoreTasks && restore_tasks, ThreadPool & thread_pool
std::rethrow_exception(exception);
}
/// Returns access required to execute RESTORE query.
AccessRightsElements getRequiredAccessToRestore(const ASTBackupQuery::Elements & elements, const RestoreSettings & restore_settings)
{
AccessRightsElements required_access;
for (const auto & element : elements)
{
switch (element.type)
{
case ASTBackupQuery::TABLE:
{
if (element.is_temp_db)
{
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
required_access.emplace_back(AccessType::CREATE_TEMPORARY_TABLE);
break;
}
AccessFlags flags = AccessType::SHOW_TABLES;
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
flags |= AccessType::CREATE_TABLE;
if (!restore_settings.structure_only)
flags |= AccessType::INSERT;
required_access.emplace_back(flags, element.new_name.first, element.new_name.second);
break;
}
case ASTBackupQuery::DATABASE:
{
if (element.is_temp_db)
{
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
required_access.emplace_back(AccessType::CREATE_TEMPORARY_TABLE);
break;
}
AccessFlags flags = AccessType::SHOW_TABLES | AccessType::SHOW_DATABASES;
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
flags |= AccessType::CREATE_TABLE;
if (restore_settings.create_database != RestoreDatabaseCreationMode::kMustExist)
flags |= AccessType::CREATE_DATABASE;
if (!restore_settings.structure_only)
flags |= AccessType::INSERT;
required_access.emplace_back(flags, element.new_name.first);
break;
}
case ASTBackupQuery::ALL_DATABASES:
{
AccessFlags flags = AccessType::SHOW_TABLES | AccessType::SHOW_DATABASES;
if (restore_settings.create_table != RestoreTableCreationMode::kMustExist)
flags |= AccessType::CREATE_TABLE;
if (restore_settings.create_database != RestoreDatabaseCreationMode::kMustExist)
flags |= AccessType::CREATE_DATABASE;
if (!restore_settings.structure_only)
flags |= AccessType::INSERT;
required_access.emplace_back(flags);
break;
}
}
}
return required_access;
}
}

View File

@ -14,6 +14,7 @@ using RestoreTaskPtr = std::unique_ptr<IRestoreTask>;
using RestoreTasks = std::vector<RestoreTaskPtr>;
struct RestoreSettings;
class IRestoreCoordination;
class AccessRightsElements;
class Context;
using ContextPtr = std::shared_ptr<const Context>;
using ContextMutablePtr = std::shared_ptr<Context>;
@ -22,6 +23,16 @@ using ContextMutablePtr = std::shared_ptr<Context>;
RestoreTasks makeRestoreTasks(ContextMutablePtr context, const BackupPtr & backup, const ASTBackupQuery::Elements & elements, const RestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination, std::chrono::seconds timeout_for_restoring_metadata);
/// Executes restore tasks.
void executeRestoreTasks(RestoreTasks && tasks, ThreadPool & thread_pool, const RestoreSettings & restore_settings, const std::shared_ptr<IRestoreCoordination> & restore_coordination, std::chrono::seconds timeout_for_restoring_metadata);
void restoreMetadata(
RestoreTasks & restore_tasks,
const RestoreSettings & restore_settings,
const std::shared_ptr<IRestoreCoordination> & restore_coordination,
std::chrono::seconds timeout_for_restoring_metadata);
void restoreData(RestoreTasks & restore_tasks, ThreadPool & thread_pool);
/// Returns access required to execute RESTORE query.
AccessRightsElements getRequiredAccessToRestore(const ASTBackupQuery::Elements & elements, const RestoreSettings & restore_settings);
}

View File

@ -180,7 +180,7 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory)
writer = std::make_shared<BackupWriterFile>(path);
else
writer = std::make_shared<BackupWriterDisk>(disk, path);
return std::make_unique<BackupImpl>(backup_name, archive_params, params.base_backup_info, writer, params.context, params.backup_uuid, params.is_internal_backup, params.coordination_zk_path);
return std::make_unique<BackupImpl>(backup_name, archive_params, params.base_backup_info, writer, params.context, params.backup_uuid, params.is_internal_backup, params.backup_coordination);
}
};

View File

@ -92,6 +92,7 @@
M(FilesystemCacheReadBuffers, "Number of active cache buffers") \
M(CacheFileSegments, "Number of existing cache file segments") \
M(CacheDetachedFileSegments, "Number of existing detached cache file segments") \
M(S3Requests, "S3 requests") \
namespace CurrentMetrics
{

View File

@ -19,7 +19,7 @@
/** This file was edited for ClickHouse.
*/
#include <string.h>
#include <cstring>
#include <Common/Elf.h>
#include <Common/Dwarf.h>

View File

@ -4,7 +4,7 @@
#include <Common/Exception.h>
#include <base/unaligned.h>
#include <string.h>
#include <cstring>
namespace DB

View File

@ -623,7 +623,7 @@
M(652, ONLY_NULLS_WHILE_READING_SCHEMA) \
M(653, CANNOT_PARSE_BACKUP_SETTINGS) \
M(654, WRONG_BACKUP_SETTINGS) \
M(655, FAILED_TO_RESTORE_METADATA_ON_OTHER_NODE) \
M(655, FAILED_TO_SYNC_BACKUP_OR_RESTORE) \
\
M(999, KEEPER_EXCEPTION) \
M(1000, POCO_EXCEPTION) \

View File

@ -1,6 +1,6 @@
#include "Exception.h"
#include <string.h>
#include <cstring>
#include <cxxabi.h>
#include <cstdlib>
#include <Poco/String.h>

View File

@ -1,7 +1,7 @@
#include <Common/IO.h>
#include <unistd.h>
#include <errno.h>
#include <cerrno>
#include <cstring>
bool writeRetry(int fd, const char * data, size_t size)

View File

@ -20,10 +20,12 @@ struct OvercommitRatio
friend bool operator<(OvercommitRatio const & lhs, OvercommitRatio const & rhs) noexcept
{
Int128 lhs_committed = lhs.committed, lhs_soft_limit = lhs.soft_limit;
Int128 rhs_committed = rhs.committed, rhs_soft_limit = rhs.soft_limit;
// (a / b < c / d) <=> (a * d < c * b)
return (lhs.committed * rhs.soft_limit) < (rhs.committed * lhs.soft_limit)
|| (lhs.soft_limit == 0 && rhs.soft_limit > 0)
|| (lhs.committed == 0 && rhs.committed == 0 && lhs.soft_limit > rhs.soft_limit);
return (lhs_committed * rhs_soft_limit) < (rhs_committed * lhs_soft_limit)
|| (lhs_soft_limit == 0 && rhs_soft_limit > 0)
|| (lhs_committed == 0 && rhs_committed == 0 && lhs_soft_limit > rhs_soft_limit);
}
// actual query memory usage

View File

@ -3,7 +3,7 @@
#include <fcntl.h>
#include <dlfcn.h>
#include <unistd.h>
#include <time.h>
#include <ctime>
#include <csignal>
#include <Common/logger_useful.h>

View File

@ -2,7 +2,7 @@
#include <sys/file.h>
#include <fcntl.h>
#include <errno.h>
#include <cerrno>
#include <Common/logger_useful.h>
#include <base/errnoToString.h>

View File

@ -93,6 +93,13 @@ String toString(TargetArch arch);
#define USE_MULTITARGET_CODE 1
#if defined(__clang__)
#define AVX512_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f")))
#define AVX2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2")))
#define AVX_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx"))
#define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt")))
#define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE
# define BEGIN_AVX512F_SPECIFIC_CODE \
_Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f\"))),apply_to=function)")
# define BEGIN_AVX2_SPECIFIC_CODE \
@ -109,6 +116,13 @@ String toString(TargetArch arch);
*/
# define DUMMY_FUNCTION_DEFINITION [[maybe_unused]] void _dummy_function_definition();
#else
#define AVX512_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,tune=native")))
#define AVX2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,tune=native")))
#define AVX_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,tune=native")))
#define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt",tune=native))))
#define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE
# define BEGIN_AVX512F_SPECIFIC_CODE \
_Pragma("GCC push_options") \
_Pragma("GCC target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,tune=native\")")
@ -212,4 +226,74 @@ DECLARE_AVX512F_SPECIFIC_CODE(
constexpr auto BuildArch = TargetArch::AVX512F; /// NOLINT
) // DECLARE_AVX512F_SPECIFIC_CODE
/** Runtime Dispatch helpers for class members.
*
* Example of usage:
*
* class TestClass
* {
* public:
* MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(testFunctionImpl,
* MULTITARGET_FH(int), /\*testFunction*\/ MULTITARGET_FB((int value)
* {
* return value;
* })
* )
*
* void testFunction(int value) {
* if (isArchSupported(TargetArch::AVX2))
* {
* testFunctionImplAVX2(value);
* }
* else if (isArchSupported(TargetArch::SSE42))
* {
* testFunctionImplSSE42(value);
* }
* else
* {
* testFunction(value);
* }
* }
*};
*
*/
/// Function header
#define MULTITARGET_FH(...) __VA_ARGS__
/// Function body
#define MULTITARGET_FB(...) __VA_ARGS__
#if ENABLE_MULTITARGET_CODE && defined(__GNUC__) && defined(__x86_64__)
/// NOLINTNEXTLINE
#define MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(name, FUNCTION_HEADER, FUNCTION_BODY) \
FUNCTION_HEADER \
\
AVX2_FUNCTION_SPECIFIC_ATTRIBUTE \
name##AVX2 \
FUNCTION_BODY \
\
FUNCTION_HEADER \
\
AVX2_FUNCTION_SPECIFIC_ATTRIBUTE \
name##SSE42 \
FUNCTION_BODY \
\
FUNCTION_HEADER \
\
name \
FUNCTION_BODY \
#else
/// NOLINTNEXTLINE
#define MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(name, FUNCTION_HEADER, FUNCTION_BODY) \
FUNCTION_HEADER \
\
name \
FUNCTION_BODY \
#endif
}

View File

@ -9,10 +9,10 @@
#include "hasLinuxCapability.h"
#include <base/unaligned.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cerrno>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sys/socket.h>
#include <linux/genetlink.h>
#include <linux/netlink.h>

View File

@ -1,4 +1,4 @@
#include <signal.h>
#include <csignal>
#include <sys/time.h>
#if defined(OS_LINUX)
# include <sys/sysinfo.h>

View File

@ -1,4 +1,4 @@
#include <string.h>
#include <cstring>
#include <string_view>
#include <Common/clearPasswordFromCommandLine.h>

View File

@ -1,6 +1,6 @@
#include <Common/createHardLink.h>
#include <Common/Exception.h>
#include <errno.h>
#include <cerrno>
#include <unistd.h>
#include <sys/stat.h>

View File

@ -1,5 +1,5 @@
#if defined(OS_LINUX)
#include <stdlib.h>
#include <cstdlib>
/// Interposing these symbols explicitly. The idea works like this: malloc.cpp compiles to a
/// dedicated object (namely clickhouse_malloc.o), and it will show earlier in the link command

View File

@ -1,4 +1,4 @@
#include <time.h>
#include <ctime>
#include <unistd.h>
#include <sys/types.h>
#include <Common/Exception.h>

View File

@ -1,5 +1,5 @@
#include <city.h>
#include <string.h>
#include <cstring>
#include <base/unaligned.h>
#include <base/types.h>

Some files were not shown because too many files have changed in this diff Show More